forked from TrueCloudLab/rclone
vendor: update all dependencies
This commit is contained in:
parent
3dcf1e61cf
commit
479c803fd9
446 changed files with 25054 additions and 8064 deletions
3
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
|
@ -12,6 +12,7 @@ import (
|
|||
type Config struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
PartitionID string
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
SigningName string
|
||||
|
@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
|
|||
default:
|
||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = 3
|
||||
maxRetries = DefaultRetryerMaxNumRetries
|
||||
}
|
||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||
}
|
||||
|
|
130
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
130
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -9,69 +10,142 @@ import (
|
|||
)
|
||||
|
||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||
// most services. If you want to implement custom retry logic, implement the
|
||||
// request.Retryer interface or create a structure type that composes this
|
||||
// struct and override the specific methods. For example, to override only
|
||||
// the MaxRetries method:
|
||||
// most services. If you want to implement custom retry logic, you can implement the
|
||||
// request.Retryer interface.
|
||||
//
|
||||
// type retryer struct {
|
||||
// client.DefaultRetryer
|
||||
// }
|
||||
//
|
||||
// // This implementation always has 100 max retries
|
||||
// func (d retryer) MaxRetries() int { return 100 }
|
||||
type DefaultRetryer struct {
|
||||
// Num max Retries is the number of max retries that will be performed.
|
||||
// By default, this is zero.
|
||||
NumMaxRetries int
|
||||
|
||||
// MinRetryDelay is the minimum retry delay after which retry will be performed.
|
||||
// If not set, the value is 0ns.
|
||||
MinRetryDelay time.Duration
|
||||
|
||||
// MinThrottleRetryDelay is the minimum retry delay when throttled.
|
||||
// If not set, the value is 0ns.
|
||||
MinThrottleDelay time.Duration
|
||||
|
||||
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
|
||||
// If not set, the value is 0ns.
|
||||
MaxRetryDelay time.Duration
|
||||
|
||||
// MaxThrottleDelay is the maximum retry delay when throttled.
|
||||
// If not set, the value is 0ns.
|
||||
MaxThrottleDelay time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultRetryerMaxNumRetries sets maximum number of retries
|
||||
DefaultRetryerMaxNumRetries = 3
|
||||
|
||||
// DefaultRetryerMinRetryDelay sets minimum retry delay
|
||||
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
|
||||
|
||||
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
|
||||
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
|
||||
|
||||
// DefaultRetryerMaxRetryDelay sets maximum retry delay
|
||||
DefaultRetryerMaxRetryDelay = 300 * time.Second
|
||||
|
||||
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
|
||||
DefaultRetryerMaxThrottleDelay = 300 * time.Second
|
||||
)
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API request.
|
||||
func (d DefaultRetryer) MaxRetries() int {
|
||||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
// setRetryerDefaults sets the default values of the retryer if not set
|
||||
func (d *DefaultRetryer) setRetryerDefaults() {
|
||||
if d.MinRetryDelay == 0 {
|
||||
d.MinRetryDelay = DefaultRetryerMinRetryDelay
|
||||
}
|
||||
if d.MaxRetryDelay == 0 {
|
||||
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
|
||||
}
|
||||
if d.MinThrottleDelay == 0 {
|
||||
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
|
||||
}
|
||||
if d.MaxThrottleDelay == 0 {
|
||||
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
|
||||
}
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
minTime := 30
|
||||
|
||||
// if number of max retries is zero, no retries will be performed.
|
||||
if d.NumMaxRetries == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Sets default value for retryer members
|
||||
d.setRetryerDefaults()
|
||||
|
||||
// minDelay is the minimum retryer delay
|
||||
minDelay := d.MinRetryDelay
|
||||
|
||||
var initialDelay time.Duration
|
||||
|
||||
isThrottle := r.IsErrorThrottle()
|
||||
if isThrottle {
|
||||
if delay, ok := getRetryDelay(r); ok {
|
||||
return delay
|
||||
if delay, ok := getRetryAfterDelay(r); ok {
|
||||
initialDelay = delay
|
||||
}
|
||||
|
||||
minTime = 500
|
||||
minDelay = d.MinThrottleDelay
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
if isThrottle && retryCount > 8 {
|
||||
retryCount = 8
|
||||
} else if retryCount > 13 {
|
||||
retryCount = 13
|
||||
|
||||
// maxDelay the maximum retryer delay
|
||||
maxDelay := d.MaxRetryDelay
|
||||
|
||||
if isThrottle {
|
||||
maxDelay = d.MaxThrottleDelay
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
var delay time.Duration
|
||||
|
||||
// Logic to cap the retry count based on the minDelay provided
|
||||
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
|
||||
if actualRetryCount < 63-retryCount {
|
||||
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
|
||||
if delay > maxDelay {
|
||||
delay = getJitterDelay(maxDelay / 2)
|
||||
}
|
||||
} else {
|
||||
delay = getJitterDelay(maxDelay / 2)
|
||||
}
|
||||
return delay + initialDelay
|
||||
}
|
||||
|
||||
// getJitterDelay returns a jittered delay for retry
|
||||
func getJitterDelay(duration time.Duration) time.Duration {
|
||||
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
|
||||
}
|
||||
|
||||
// ShouldRetry returns true if the request should be retried.
|
||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||
|
||||
// ShouldRetry returns false if number of max retries is 0.
|
||||
if d.NumMaxRetries == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable != nil {
|
||||
return *r.Retryable
|
||||
}
|
||||
|
||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
||||
return true
|
||||
}
|
||||
|
||||
return r.IsErrorRetryable() || r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
// This will look in the Retry-After header, RFC 7231, for how long
|
||||
// it will wait before attempting another request
|
||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
||||
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
|
||||
if !canUseRetryAfterHeader(r) {
|
||||
return 0, false
|
||||
}
|
||||
|
|
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
|
@ -5,6 +5,7 @@ type ClientInfo struct {
|
|||
ServiceName string
|
||||
ServiceID string
|
||||
APIVersion string
|
||||
PartitionID string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
SigningRegion string
|
||||
|
|
28
vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
generated
vendored
Normal file
28
vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// NoOpRetryer provides a retryer that performs no retries.
|
||||
// It should be used when we do not want retries to be performed.
|
||||
type NoOpRetryer struct{}
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
|
||||
func (d NoOpRetryer) MaxRetries() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
|
||||
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again;
|
||||
// since NoOpRetryer does not retry, RetryRules always returns 0.
|
||||
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
|
||||
return 0
|
||||
}
|
14
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
@ -246,6 +246,9 @@ type Config struct {
|
|||
// Disabling this feature is useful when you want to use local endpoints
|
||||
// for testing that do not support the modeled host prefix pattern.
|
||||
DisableEndpointHostPrefix *bool
|
||||
|
||||
// STSRegionalEndpoint will enable regional or legacy endpoint resolving
|
||||
STSRegionalEndpoint endpoints.STSRegionalEndpoint
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
|
@ -420,6 +423,13 @@ func (c *Config) MergeIn(cfgs ...*Config) {
|
|||
}
|
||||
}
|
||||
|
||||
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
|
||||
// when resolving the endpoint for a service
|
||||
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
|
||||
c.STSRegionalEndpoint = sre
|
||||
return c
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
|
@ -520,6 +530,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
|||
if other.DisableEndpointHostPrefix != nil {
|
||||
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
|
||||
}
|
||||
|
||||
if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
|
||||
dst.STSRegionalEndpoint = other.STSRegionalEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
|
|
531
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
531
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
|
@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Uint returns a pointer to the uint value passed in.
|
||||
func Uint(v uint) *uint {
|
||||
return &v
|
||||
}
|
||||
|
||||
// UintValue returns the value of the uint pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func UintValue(v *uint) uint {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// UintSlice converts a slice of uint values uinto a slice of
|
||||
// uint pointers
|
||||
func UintSlice(src []uint) []*uint {
|
||||
dst := make([]*uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueSlice converts a slice of uint pointers uinto a slice of
|
||||
// uint values
|
||||
func UintValueSlice(src []*uint) []uint {
|
||||
dst := make([]uint, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintMap converts a string map of uint values uinto a string
|
||||
// map of uint pointers
|
||||
func UintMap(src map[string]uint) map[string]*uint {
|
||||
dst := make(map[string]*uint)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UintValueMap converts a string map of uint pointers uinto a string
|
||||
// map of uint values
|
||||
func UintValueMap(src map[string]*uint) map[string]uint {
|
||||
dst := make(map[string]uint)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8 returns a pointer to the int8 value passed in.
|
||||
func Int8(v int8) *int8 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int8Value returns the value of the int8 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int8Value(v *int8) int8 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int8Slice converts a slice of int8 values into a slice of
|
||||
// int8 pointers
|
||||
func Int8Slice(src []int8) []*int8 {
|
||||
dst := make([]*int8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8ValueSlice converts a slice of int8 pointers into a slice of
|
||||
// int8 values
|
||||
func Int8ValueSlice(src []*int8) []int8 {
|
||||
dst := make([]int8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8Map converts a string map of int8 values into a string
|
||||
// map of int8 pointers
|
||||
func Int8Map(src map[string]int8) map[string]*int8 {
|
||||
dst := make(map[string]*int8)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int8ValueMap converts a string map of int8 pointers into a string
|
||||
// map of int8 values
|
||||
func Int8ValueMap(src map[string]*int8) map[string]int8 {
|
||||
dst := make(map[string]int8)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16 returns a pointer to the int16 value passed in.
|
||||
func Int16(v int16) *int16 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int16Value returns the value of the int16 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int16Value(v *int16) int16 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int16Slice converts a slice of int16 values into a slice of
|
||||
// int16 pointers
|
||||
func Int16Slice(src []int16) []*int16 {
|
||||
dst := make([]*int16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16ValueSlice converts a slice of int16 pointers into a slice of
|
||||
// int16 values
|
||||
func Int16ValueSlice(src []*int16) []int16 {
|
||||
dst := make([]int16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16Map converts a string map of int16 values into a string
|
||||
// map of int16 pointers
|
||||
func Int16Map(src map[string]int16) map[string]*int16 {
|
||||
dst := make(map[string]*int16)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int16ValueMap converts a string map of int16 pointers into a string
|
||||
// map of int16 values
|
||||
func Int16ValueMap(src map[string]*int16) map[string]int16 {
|
||||
dst := make(map[string]int16)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32 returns a pointer to the int32 value passed in.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32Value returns the value of the int32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int32Value(v *int32) int32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int32Slice converts a slice of int32 values into a slice of
|
||||
// int32 pointers
|
||||
func Int32Slice(src []int32) []*int32 {
|
||||
dst := make([]*int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueSlice converts a slice of int32 pointers into a slice of
|
||||
// int32 values
|
||||
func Int32ValueSlice(src []*int32) []int32 {
|
||||
dst := make([]int32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32Map converts a string map of int32 values into a string
|
||||
// map of int32 pointers
|
||||
func Int32Map(src map[string]int32) map[string]*int32 {
|
||||
dst := make(map[string]*int32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int32ValueMap converts a string map of int32 pointers into a string
|
||||
// map of int32 values
|
||||
func Int32ValueMap(src map[string]*int32) map[string]int32 {
|
||||
dst := make(map[string]int32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
|
@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
|||
return dst
|
||||
}
|
||||
|
||||
// Uint8 returns a pointer to the uint8 value passed in.
|
||||
func Uint8(v uint8) *uint8 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint8Value returns the value of the uint8 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint8Value(v *uint8) uint8 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint8Slice converts a slice of uint8 values into a slice of
|
||||
// uint8 pointers
|
||||
func Uint8Slice(src []uint8) []*uint8 {
|
||||
dst := make([]*uint8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
|
||||
// uint8 values
|
||||
func Uint8ValueSlice(src []*uint8) []uint8 {
|
||||
dst := make([]uint8, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8Map converts a string map of uint8 values into a string
|
||||
// map of uint8 pointers
|
||||
func Uint8Map(src map[string]uint8) map[string]*uint8 {
|
||||
dst := make(map[string]*uint8)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint8ValueMap converts a string map of uint8 pointers into a string
|
||||
// map of uint8 values
|
||||
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
|
||||
dst := make(map[string]uint8)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16 returns a pointer to the uint16 value passed in.
|
||||
func Uint16(v uint16) *uint16 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint16Value returns the value of the uint16 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint16Value(v *uint16) uint16 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint16Slice converts a slice of uint16 values into a slice of
|
||||
// uint16 pointers
|
||||
func Uint16Slice(src []uint16) []*uint16 {
|
||||
dst := make([]*uint16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
|
||||
// uint16 values
|
||||
func Uint16ValueSlice(src []*uint16) []uint16 {
|
||||
dst := make([]uint16, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16Map converts a string map of uint16 values into a string
|
||||
// map of uint16 pointers
|
||||
func Uint16Map(src map[string]uint16) map[string]*uint16 {
|
||||
dst := make(map[string]*uint16)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint16ValueMap converts a string map of uint16 pointers into a string
|
||||
// map of uint16 values
|
||||
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
|
||||
dst := make(map[string]uint16)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32 returns a pointer to the uint32 value passed in.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32Value returns the value of the uint32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint32Value(v *uint32) uint32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint32Slice converts a slice of uint32 values into a slice of
|
||||
// uint32 pointers
|
||||
func Uint32Slice(src []uint32) []*uint32 {
|
||||
dst := make([]*uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
|
||||
// uint32 values
|
||||
func Uint32ValueSlice(src []*uint32) []uint32 {
|
||||
dst := make([]uint32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32Map converts a string map of uint32 values into a string
|
||||
// map of uint32 pointers
|
||||
func Uint32Map(src map[string]uint32) map[string]*uint32 {
|
||||
dst := make(map[string]*uint32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint32ValueMap converts a string map of uint32 pointers into a string
|
||||
// map of uint32 values
|
||||
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
|
||||
dst := make(map[string]uint32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64 returns a pointer to the uint64 value passed in.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64Value returns the value of the uint64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Uint64Value(v *uint64) uint64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Uint64Slice converts a slice of uint64 values into a slice of
|
||||
// uint64 pointers
|
||||
func Uint64Slice(src []uint64) []*uint64 {
|
||||
dst := make([]*uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
|
||||
// uint64 values
|
||||
func Uint64ValueSlice(src []*uint64) []uint64 {
|
||||
dst := make([]uint64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64Map converts a string map of uint64 values into a string
|
||||
// map of uint64 pointers
|
||||
func Uint64Map(src map[string]uint64) map[string]*uint64 {
|
||||
dst := make(map[string]*uint64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Uint64ValueMap converts a string map of uint64 pointers into a string
|
||||
// map of uint64 values
|
||||
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
|
||||
dst := make(map[string]uint64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32 returns a pointer to the float32 value passed in.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32Value returns the value of the float32 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float32Value(v *float32) float32 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float32Slice converts a slice of float32 values into a slice of
|
||||
// float32 pointers
|
||||
func Float32Slice(src []float32) []*float32 {
|
||||
dst := make([]*float32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueSlice converts a slice of float32 pointers into a slice of
|
||||
// float32 values
|
||||
func Float32ValueSlice(src []*float32) []float32 {
|
||||
dst := make([]float32, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32Map converts a string map of float32 values into a string
|
||||
// map of float32 pointers
|
||||
func Float32Map(src map[string]float32) map[string]*float32 {
|
||||
dst := make(map[string]*float32)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float32ValueMap converts a string map of float32 pointers into a string
|
||||
// map of float32 values
|
||||
func Float32ValueMap(src map[string]*float32) map[string]float32 {
|
||||
dst := make(map[string]float32)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
|
|
11
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
|
@ -16,25 +16,26 @@ var (
|
|||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused int64
|
||||
paused *int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
ch: make(chan metric, size),
|
||||
paused: new(int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
||||
atomic.StoreInt64(ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
||||
atomic.StoreInt64(ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(&ch.paused)
|
||||
v := atomic.LoadInt64(ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
|
|
1
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
|
@ -66,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
|||
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
|
||||
AccessKey: aws.String(creds.AccessKeyID),
|
||||
}
|
||||
|
|
29
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
29
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
@ -152,18 +152,19 @@ type EC2IAMInfo struct {
|
|||
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
|
||||
// an instance identity document
|
||||
type EC2InstanceIdentityDocument struct {
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
}
|
||||
|
|
1212
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
1212
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load diff
49
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
49
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
|
@ -3,6 +3,7 @@ package endpoints
|
|||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
@ -46,6 +47,43 @@ type Options struct {
|
|||
//
|
||||
// This option is ignored if StrictMatching is enabled.
|
||||
ResolveUnknownService bool
|
||||
|
||||
// STS Regional Endpoint flag helps with resolving the STS endpoint
|
||||
STSRegionalEndpoint STSRegionalEndpoint
|
||||
}
|
||||
|
||||
// STSRegionalEndpoint is an enum type alias for int
|
||||
// It is used internally by the core sdk as STS Regional Endpoint flag value
|
||||
type STSRegionalEndpoint int
|
||||
|
||||
const (
|
||||
|
||||
// UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
|
||||
UnsetSTSEndpoint STSRegionalEndpoint = iota
|
||||
|
||||
// LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
|
||||
// to use legacy endpoints.
|
||||
LegacySTSEndpoint
|
||||
|
||||
// RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
|
||||
// to use regional endpoints.
|
||||
RegionalSTSEndpoint
|
||||
)
|
||||
|
||||
// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
|
||||
// on the input string provided in env config or shared config by the user.
|
||||
//
|
||||
// `legacy`, `regional` are the only case-insensitive valid strings for
|
||||
// resolving the STS regional Endpoint flag.
|
||||
func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
|
||||
switch {
|
||||
case strings.EqualFold(s, "legacy"):
|
||||
return LegacySTSEndpoint, nil
|
||||
case strings.EqualFold(s, "regional"):
|
||||
return RegionalSTSEndpoint, nil
|
||||
default:
|
||||
return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together.
|
||||
|
@ -79,6 +117,12 @@ func ResolveUnknownServiceOption(o *Options) {
|
|||
o.ResolveUnknownService = true
|
||||
}
|
||||
|
||||
// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
|
||||
// STS endpoint to their regional endpoint, instead of the global endpoint.
|
||||
func STSRegionalEndpointOption(o *Options) {
|
||||
o.STSRegionalEndpoint = RegionalSTSEndpoint
|
||||
}
|
||||
|
||||
// A Resolver provides the interface for functionality to resolve endpoints.
|
||||
// The build in Partition and DefaultResolver return value satisfy this interface.
|
||||
type Resolver interface {
|
||||
|
@ -194,7 +238,7 @@ func (p Partition) ID() string { return p.id }
|
|||
// require the provided service and region to be known by the partition.
|
||||
// If the endpoint cannot be strictly resolved an error will be returned. This
|
||||
// mode is useful to ensure the endpoint resolved is valid. Without
|
||||
// StrictMatching enabled the endpoint returned my look valid but may not work.
|
||||
// StrictMatching enabled the endpoint returned may look valid but may not work.
|
||||
// StrictMatching requires the SDK to be updated if you want to take advantage
|
||||
// of new regions and services expansions.
|
||||
//
|
||||
|
@ -350,6 +394,9 @@ type ResolvedEndpoint struct {
|
|||
// The endpoint URL
|
||||
URL string
|
||||
|
||||
// The endpoint partition
|
||||
PartitionID string
|
||||
|
||||
// The region that should be used for signing requests.
|
||||
SigningRegion string
|
||||
|
||||
|
|
19
vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go
generated
vendored
Normal file
19
vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package endpoints
|
||||
|
||||
var stsLegacyGlobalRegions = map[string]struct{}{
|
||||
"ap-northeast-1": {},
|
||||
"ap-south-1": {},
|
||||
"ap-southeast-1": {},
|
||||
"ap-southeast-2": {},
|
||||
"ca-central-1": {},
|
||||
"eu-central-1": {},
|
||||
"eu-north-1": {},
|
||||
"eu-west-1": {},
|
||||
"eu-west-2": {},
|
||||
"eu-west-3": {},
|
||||
"sa-east-1": {},
|
||||
"us-east-1": {},
|
||||
"us-east-2": {},
|
||||
"us-west-1": {},
|
||||
"us-west-2": {},
|
||||
}
|
40
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
|
@ -75,24 +75,55 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool)
|
|||
return p.RegionRegex.MatchString(region)
|
||||
}
|
||||
|
||||
func allowLegacyEmptyRegion(service string) bool {
|
||||
legacy := map[string]struct{}{
|
||||
"budgets": {},
|
||||
"ce": {},
|
||||
"chime": {},
|
||||
"cloudfront": {},
|
||||
"ec2metadata": {},
|
||||
"iam": {},
|
||||
"importexport": {},
|
||||
"organizations": {},
|
||||
"route53": {},
|
||||
"sts": {},
|
||||
"support": {},
|
||||
"waf": {},
|
||||
}
|
||||
|
||||
_, allowed := legacy[service]
|
||||
return allowed
|
||||
}
|
||||
|
||||
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
|
||||
var opt Options
|
||||
opt.Set(opts...)
|
||||
|
||||
s, hasService := p.Services[service]
|
||||
if !(hasService || opt.ResolveUnknownService) {
|
||||
if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
|
||||
// Only return error if the resolver will not fallback to creating
|
||||
// endpoint based on service endpoint ID passed in.
|
||||
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
|
||||
}
|
||||
|
||||
if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
|
||||
region = s.PartitionEndpoint
|
||||
}
|
||||
|
||||
if service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint {
|
||||
if _, ok := stsLegacyGlobalRegions[region]; ok {
|
||||
region = "aws-global"
|
||||
}
|
||||
}
|
||||
|
||||
e, hasEndpoint := s.endpointForRegion(region)
|
||||
if !hasEndpoint && opt.StrictMatching {
|
||||
if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) {
|
||||
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
|
||||
}
|
||||
|
||||
defs := []endpoint{p.Defaults, s.Defaults}
|
||||
return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
|
||||
|
||||
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
|
||||
}
|
||||
|
||||
func serviceList(ss services) []string {
|
||||
|
@ -201,7 +232,7 @@ func getByPriority(s []string, p []string, def string) string {
|
|||
return s[0]
|
||||
}
|
||||
|
||||
func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
|
||||
func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
|
||||
var merged endpoint
|
||||
for _, def := range defs {
|
||||
merged.mergeIn(def)
|
||||
|
@ -237,6 +268,7 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
|
|||
|
||||
return ResolvedEndpoint{
|
||||
URL: u,
|
||||
PartitionID: partitionID,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
|
@ -23,7 +23,7 @@ type Handlers struct {
|
|||
Complete HandlerList
|
||||
}
|
||||
|
||||
// Copy returns of this handler's lists.
|
||||
// Copy returns a copy of this handler's lists.
|
||||
func (h *Handlers) Copy() Handlers {
|
||||
return Handlers{
|
||||
Validate: h.Validate.copy(),
|
||||
|
@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers {
|
|||
}
|
||||
}
|
||||
|
||||
// Clear removes callback functions for all handlers
|
||||
// Clear removes callback functions for all handlers.
|
||||
func (h *Handlers) Clear() {
|
||||
h.Validate.Clear()
|
||||
h.Build.Clear()
|
||||
|
|
12
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
|
@ -99,8 +99,12 @@ type Operation struct {
|
|||
BeforePresignFn func(r *Request) error
|
||||
}
|
||||
|
||||
// New returns a new Request pointer for the service API
|
||||
// operation and parameters.
|
||||
// New returns a new Request pointer for the service API operation and
|
||||
// parameters.
|
||||
//
|
||||
// A Retryer should be provided to direct how the request is retried. If
|
||||
// Retryer is nil, a default no retry value will be used. You can use
|
||||
// NoOpRetryer in the Client package to disable retry behavior directly.
|
||||
//
|
||||
// Params is any value of input parameters to be the request payload.
|
||||
// Data is pointer value to an object which the request's response
|
||||
|
@ -108,6 +112,10 @@ type Operation struct {
|
|||
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
|
||||
|
||||
if retryer == nil {
|
||||
retryer = noOpRetryer{}
|
||||
}
|
||||
|
||||
method := operation.HTTPMethod
|
||||
if method == "" {
|
||||
method = "POST"
|
||||
|
|
55
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
55
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -35,10 +35,41 @@ type Retryer interface {
|
|||
}
|
||||
|
||||
// WithRetryer sets a Retryer value to the given Config returning the Config
|
||||
// value for chaining.
|
||||
// value for chaining. The value must not be nil.
|
||||
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||
if retryer == nil {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
|
||||
}
|
||||
retryer = noOpRetryer{}
|
||||
}
|
||||
cfg.Retryer = retryer
|
||||
return cfg
|
||||
|
||||
}
|
||||
|
||||
// noOpRetryer is a internal no op retryer used when a request is created
|
||||
// without a retryer.
|
||||
//
|
||||
// Provides a retryer that performs no retries.
|
||||
// It should be used when we do not want retries to be performed.
|
||||
type noOpRetryer struct{}
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
|
||||
func (d noOpRetryer) MaxRetries() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
|
||||
func (d noOpRetryer) ShouldRetry(_ *Request) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again;
|
||||
// since NoOpRetryer does not retry, RetryRules always returns 0.
|
||||
func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
// retryableCodes is a collection of service response codes which are retry-able
|
||||
|
@ -94,10 +125,6 @@ var validParentCodes = map[string]struct{}{
|
|||
ErrCodeRead: {},
|
||||
}
|
||||
|
||||
type temporaryError interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
||||
if parentErr == nil {
|
||||
return false
|
||||
|
@ -116,7 +143,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
|||
return isCodeRetryable(aerr.Code())
|
||||
}
|
||||
|
||||
if t, ok := err.(temporaryError); ok {
|
||||
if t, ok := err.(temporary); ok {
|
||||
return t.Temporary() || isErrConnectionReset(err)
|
||||
}
|
||||
|
||||
|
@ -223,6 +250,16 @@ func (r *Request) IsErrorRetryable() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// HTTP response status code 501 should not be retried.
|
||||
// 501 represents Not Implemented which means the request method is not
|
||||
// supported by the server and cannot be handled.
|
||||
if r.HTTPResponse != nil {
|
||||
// HTTP response status code 500 represents internal server error and
|
||||
// should be retried without any throttle.
|
||||
if r.HTTPResponse.StatusCode == 500 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return IsErrorRetryable(r.Error)
|
||||
}
|
||||
|
||||
|
@ -237,7 +274,11 @@ func (r *Request) IsErrorThrottle() bool {
|
|||
|
||||
if r.HTTPResponse != nil {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429, 502, 503, 504:
|
||||
case
|
||||
429, // error caused due to too many requests
|
||||
502, // Bad Gateway error should be throttled
|
||||
503, // caused when service is unavailable
|
||||
504: // error occurred due to gateway timeout
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
32
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
|
@ -1,12 +1,14 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
)
|
||||
|
||||
// EnvProviderName provides a name of the provider when config is loaded from environment.
|
||||
|
@ -125,6 +127,12 @@ type envConfig struct {
|
|||
//
|
||||
// AWS_ROLE_SESSION_NAME=session_name
|
||||
RoleSessionName string
|
||||
|
||||
// Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
|
||||
//
|
||||
// AWS_STS_REGIONAL_ENDPOINTS =sts_regional_endpoint
|
||||
// This can take value as `regional` or `legacy`
|
||||
STSRegionalEndpoint endpoints.STSRegionalEndpoint
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -179,6 +187,9 @@ var (
|
|||
roleSessionNameEnvKey = []string{
|
||||
"AWS_ROLE_SESSION_NAME",
|
||||
}
|
||||
stsRegionalEndpointKey = []string{
|
||||
"AWS_STS_REGIONAL_ENDPOINTS",
|
||||
}
|
||||
)
|
||||
|
||||
// loadEnvConfig retrieves the SDK's environment configuration.
|
||||
|
@ -187,7 +198,7 @@ var (
|
|||
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
|
||||
// the shared SDK config will be loaded in addition to the SDK's specific
|
||||
// configuration values.
|
||||
func loadEnvConfig() envConfig {
|
||||
func loadEnvConfig() (envConfig, error) {
|
||||
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
|
||||
return envConfigLoad(enableSharedConfig)
|
||||
}
|
||||
|
@ -198,11 +209,11 @@ func loadEnvConfig() envConfig {
|
|||
// Loads the shared configuration in addition to the SDK's specific configuration.
|
||||
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
|
||||
// environment variable is set.
|
||||
func loadSharedEnvConfig() envConfig {
|
||||
func loadSharedEnvConfig() (envConfig, error) {
|
||||
return envConfigLoad(true)
|
||||
}
|
||||
|
||||
func envConfigLoad(enableSharedConfig bool) envConfig {
|
||||
func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
|
||||
cfg := envConfig{}
|
||||
|
||||
cfg.EnableSharedConfig = enableSharedConfig
|
||||
|
@ -264,12 +275,23 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
|||
|
||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
||||
|
||||
return cfg
|
||||
// STS Regional Endpoint variable
|
||||
for _, k := range stsRegionalEndpointKey {
|
||||
if v := os.Getenv(k); len(v) != 0 {
|
||||
STSRegionalEndpoint, err := endpoints.GetSTSRegionalEndpoint(v)
|
||||
if err != nil {
|
||||
return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
|
||||
}
|
||||
cfg.STSRegionalEndpoint = STSRegionalEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func setFromEnvVal(dst *string, keys []string) {
|
||||
for _, k := range keys {
|
||||
if v := os.Getenv(k); len(v) > 0 {
|
||||
if v := os.Getenv(k); len(v) != 0 {
|
||||
*dst = v
|
||||
break
|
||||
}
|
||||
|
|
135
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
135
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -73,7 +73,7 @@ type Session struct {
|
|||
// func is called instead of waiting to receive an error until a request is made.
|
||||
func New(cfgs ...*aws.Config) *Session {
|
||||
// load initial config from environment
|
||||
envCfg := loadEnvConfig()
|
||||
envCfg, envErr := loadEnvConfig()
|
||||
|
||||
if envCfg.EnableSharedConfig {
|
||||
var cfg aws.Config
|
||||
|
@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
// Session creation failed, need to report the error and prevent
|
||||
// any requests from succeeding.
|
||||
s = &Session{Config: defaults.Config()}
|
||||
s.Config.MergeIn(cfgs...)
|
||||
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
|
||||
s.Handlers.Validate.PushBack(func(r *request.Request) {
|
||||
r.Error = err
|
||||
})
|
||||
s.logDeprecatedNewSessionError(msg, err, cfgs)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
s := deprecatedNewSession(cfgs...)
|
||||
if envErr != nil {
|
||||
msg := "failed to load env config"
|
||||
s.logDeprecatedNewSessionError(msg, envErr, cfgs)
|
||||
}
|
||||
|
||||
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
|
||||
if l := s.Config.Logger; l != nil {
|
||||
|
@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
} else if csmCfg.Enabled {
|
||||
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to enable CSM, %v", err)
|
||||
s.Config.Logger.Log("ERROR:", err.Error())
|
||||
s.Handlers.Validate.PushBack(func(r *request.Request) {
|
||||
r.Error = err
|
||||
})
|
||||
msg := "failed to enable CSM"
|
||||
s.logDeprecatedNewSessionError(msg, err, cfgs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
// to be built with retrieving credentials with AssumeRole set in the config.
|
||||
//
|
||||
// See the NewSessionWithOptions func for information on how to override or
|
||||
// control through code how the Session will be created. Such as specifying the
|
||||
// control through code how the Session will be created, such as specifying the
|
||||
// config profile, and controlling if shared config is enabled or not.
|
||||
func NewSession(cfgs ...*aws.Config) (*Session, error) {
|
||||
opts := Options{}
|
||||
|
@ -279,10 +276,17 @@ type Options struct {
|
|||
// }))
|
||||
func NewSessionWithOptions(opts Options) (*Session, error) {
|
||||
var envCfg envConfig
|
||||
var err error
|
||||
if opts.SharedConfigState == SharedConfigEnable {
|
||||
envCfg = loadSharedEnvConfig()
|
||||
envCfg, err = loadSharedEnvConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load shared config, %v", err)
|
||||
}
|
||||
} else {
|
||||
envCfg = loadEnvConfig()
|
||||
envCfg, err = loadEnvConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load environment config, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.Profile) != 0 {
|
||||
|
@ -550,6 +554,9 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
|
|||
}
|
||||
}
|
||||
|
||||
// Regional Endpoint flag for STS endpoint resolving
|
||||
mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg)
|
||||
|
||||
// Configure credentials if not already set by the user when creating the
|
||||
// Session.
|
||||
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
|
||||
|
@ -563,6 +570,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
|
|||
return nil
|
||||
}
|
||||
|
||||
// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from
|
||||
// envConfig and SharedConfig with envConfig being given precedence over SharedConfig
|
||||
func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
|
||||
|
||||
cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint
|
||||
|
||||
if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
|
||||
cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint
|
||||
}
|
||||
|
||||
if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
|
||||
cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initHandlers(s *Session) {
|
||||
// Add the Validate parameter handler if it is not disabled.
|
||||
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
|
||||
|
@ -571,7 +594,7 @@ func initHandlers(s *Session) {
|
|||
}
|
||||
}
|
||||
|
||||
// Copy creates and returns a copy of the current Session, coping the config
|
||||
// Copy creates and returns a copy of the current Session, copying the config
|
||||
// and handlers. If any additional configs are provided they will be merged
|
||||
// on top of the Session's copied config.
|
||||
//
|
||||
|
@ -591,37 +614,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
|||
// ClientConfig satisfies the client.ConfigProvider interface and is used to
|
||||
// configure the service client instances. Passing the Session to the service
|
||||
// client's constructor (New) will use this method to configure the client.
|
||||
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
|
||||
// Backwards compatibility, the error will be eaten if user calls ClientConfig
|
||||
// directly. All SDK services will use ClientconfigWithError.
|
||||
cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
|
||||
func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
|
||||
s = s.Copy(cfgs...)
|
||||
|
||||
var resolved endpoints.ResolvedEndpoint
|
||||
var err error
|
||||
|
||||
region := aws.StringValue(s.Config.Region)
|
||||
|
||||
if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
|
||||
resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
|
||||
resolved.SigningRegion = region
|
||||
} else {
|
||||
resolved, err = s.Config.EndpointResolver.EndpointFor(
|
||||
serviceName, region,
|
||||
func(opt *endpoints.Options) {
|
||||
opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
|
||||
opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
|
||||
|
||||
// Support the condition where the service is modeled but its
|
||||
// endpoint metadata is not available.
|
||||
opt.ResolveUnknownService = true
|
||||
},
|
||||
)
|
||||
resolved, err := s.resolveEndpoint(service, region, s.Config)
|
||||
if err != nil && s.Config.Logger != nil {
|
||||
s.Config.Logger.Log(fmt.Sprintf(
|
||||
"ERROR: unable to resolve endpoint for service %q, region %q, err: %v",
|
||||
service, region, err))
|
||||
}
|
||||
|
||||
return client.Config{
|
||||
|
@ -631,7 +632,37 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
|
|||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
SigningName: resolved.SigningName,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
|
||||
|
||||
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
|
||||
resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
|
||||
func(opt *endpoints.Options) {
|
||||
opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
|
||||
opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
|
||||
// Support for STSRegionalEndpoint where the STSRegionalEndpoint is
|
||||
// provided in envConfig or sharedConfig with envConfig getting
|
||||
// precedence.
|
||||
opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
|
||||
|
||||
// Support the condition where the service is modeled but its
|
||||
// endpoint metadata is not available.
|
||||
opt.ResolveUnknownService = true
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return endpoints.ResolvedEndpoint{}, err
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
|
||||
|
@ -641,12 +672,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
|
|||
s = s.Copy(cfgs...)
|
||||
|
||||
var resolved endpoints.ResolvedEndpoint
|
||||
|
||||
region := aws.StringValue(s.Config.Region)
|
||||
|
||||
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
|
||||
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
|
||||
resolved.SigningRegion = region
|
||||
resolved.SigningRegion = aws.StringValue(s.Config.Region)
|
||||
}
|
||||
|
||||
return client.Config{
|
||||
|
@ -658,3 +686,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
|
|||
SigningName: resolved.SigningName,
|
||||
}
|
||||
}
|
||||
|
||||
// logDeprecatedNewSessionError function enables error handling for session
|
||||
func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
|
||||
// Session creation failed, need to report the error and prevent
|
||||
// any requests from succeeding.
|
||||
s.Config.MergeIn(cfgs...)
|
||||
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
|
||||
s.Handlers.Validate.PushBack(func(r *request.Request) {
|
||||
r.Error = err
|
||||
})
|
||||
}
|
||||
|
|
21
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
21
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/internal/ini"
|
||||
)
|
||||
|
||||
|
@ -40,6 +41,9 @@ const (
|
|||
// Web Identity Token File
|
||||
webIdentityTokenFileKey = `web_identity_token_file` // optional
|
||||
|
||||
// Additional config fields for regional or legacy endpoints
|
||||
stsRegionalEndpointSharedKey = `sts_regional_endpoints`
|
||||
|
||||
// DefaultSharedConfigProfile is the default profile to be used when
|
||||
// loading configuration from the config files if another profile name
|
||||
// is not provided.
|
||||
|
@ -82,12 +86,17 @@ type sharedConfig struct {
|
|||
//
|
||||
// endpoint_discovery_enabled = true
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// CSM Options
|
||||
CSMEnabled *bool
|
||||
CSMHost string
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
|
||||
// Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
|
||||
//
|
||||
// sts_regional_endpoints = sts_regional_endpoint
|
||||
// This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
|
||||
STSRegionalEndpoint endpoints.STSRegionalEndpoint
|
||||
}
|
||||
|
||||
type sharedConfigFile struct {
|
||||
|
@ -244,8 +253,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
|
||||
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
|
||||
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
||||
|
||||
updateString(&cfg.Region, section, regionKey)
|
||||
|
||||
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
|
||||
sre, err := endpoints.GetSTSRegionalEndpoint(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load %s from shared config, %s, %v",
|
||||
stsRegionalEndpointKey, file.Filename, err)
|
||||
}
|
||||
cfg.STSRegionalEndpoint = sre
|
||||
}
|
||||
}
|
||||
|
||||
updateString(&cfg.CredentialProcess, section, credentialProcessKey)
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.23.8"
|
||||
const SDKVersion = "1.25.31"
|
||||
|
|
11
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
11
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
|
@ -162,7 +162,7 @@ loop:
|
|||
if len(tokens) == 0 {
|
||||
break loop
|
||||
}
|
||||
|
||||
// if should skip is true, we skip the tokens until should skip is set to false.
|
||||
step = SkipTokenState
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ loop:
|
|||
// S -> equal_expr' expr_stmt'
|
||||
switch k.Kind {
|
||||
case ASTKindEqualExpr:
|
||||
// assiging a value to some key
|
||||
// assigning a value to some key
|
||||
k.AppendChild(newExpression(tok))
|
||||
stack.Push(newExprStatement(k))
|
||||
case ASTKindExpr:
|
||||
|
@ -250,6 +250,13 @@ loop:
|
|||
if !runeCompare(tok.Raw(), openBrace) {
|
||||
return nil, NewParseError("expected '['")
|
||||
}
|
||||
// If OpenScopeState is not at the start, we must mark the previous ast as complete
|
||||
//
|
||||
// for example: if previous ast was a skip statement;
|
||||
// we should mark it as complete before we create a new statement
|
||||
if k.Kind != ASTKindStart {
|
||||
stack.MarkComplete(k)
|
||||
}
|
||||
|
||||
stmt := newStatement()
|
||||
stack.Push(stmt)
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
generated
vendored
|
@ -22,24 +22,24 @@ func newSkipper() skipper {
|
|||
}
|
||||
|
||||
func (s *skipper) ShouldSkip(tok Token) bool {
|
||||
// should skip state will be modified only if previous token was new line (NL);
|
||||
// and the current token is not WhiteSpace (WS).
|
||||
if s.shouldSkip &&
|
||||
s.prevTok.Type() == TokenNL &&
|
||||
tok.Type() != TokenWS {
|
||||
|
||||
s.Continue()
|
||||
return false
|
||||
}
|
||||
s.prevTok = tok
|
||||
|
||||
return s.shouldSkip
|
||||
}
|
||||
|
||||
func (s *skipper) Skip() {
|
||||
s.shouldSkip = true
|
||||
s.prevTok = emptyToken
|
||||
}
|
||||
|
||||
func (s *skipper) Continue() {
|
||||
s.shouldSkip = false
|
||||
// empty token is assigned as we return to default state, when should skip is false
|
||||
s.prevTok = emptyToken
|
||||
}
|
||||
|
|
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
package sdkio
|
||||
|
||||
const (
|
||||
// Byte is 8 bits
|
||||
Byte int64 = 1
|
||||
// KibiByte (KiB) is 1024 Bytes
|
||||
KibiByte = Byte * 1024
|
||||
// MebiByte (MiB) is 1024 KiB
|
||||
MebiByte = KibiByte * 1024
|
||||
// GibiByte (GiB) is 1024 MiB
|
||||
GibiByte = MebiByte * 1024
|
||||
)
|
15
vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
generated
vendored
Normal file
15
vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build go1.10
|
||||
|
||||
package sdkmath
|
||||
|
||||
import "math"
|
||||
|
||||
// Round returns the nearest integer, rounding half away from zero.
|
||||
//
|
||||
// Special cases are:
|
||||
// Round(±0) = ±0
|
||||
// Round(±Inf) = ±Inf
|
||||
// Round(NaN) = NaN
|
||||
func Round(x float64) float64 {
|
||||
return math.Round(x)
|
||||
}
|
56
vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
generated
vendored
Normal file
56
vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// +build !go1.10
|
||||
|
||||
package sdkmath
|
||||
|
||||
import "math"
|
||||
|
||||
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||
// Go version prior to Go 1.10.
|
||||
const (
|
||||
uvone = 0x3FF0000000000000
|
||||
mask = 0x7FF
|
||||
shift = 64 - 11 - 1
|
||||
bias = 1023
|
||||
signMask = 1 << 63
|
||||
fracMask = 1<<shift - 1
|
||||
)
|
||||
|
||||
// Round returns the nearest integer, rounding half away from zero.
|
||||
//
|
||||
// Special cases are:
|
||||
// Round(±0) = ±0
|
||||
// Round(±Inf) = ±Inf
|
||||
// Round(NaN) = NaN
|
||||
//
|
||||
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||
// Go version prior to Go 1.10.
|
||||
func Round(x float64) float64 {
|
||||
// Round is a faster implementation of:
|
||||
//
|
||||
// func Round(x float64) float64 {
|
||||
// t := Trunc(x)
|
||||
// if Abs(x-t) >= 0.5 {
|
||||
// return t + Copysign(1, x)
|
||||
// }
|
||||
// return t
|
||||
// }
|
||||
bits := math.Float64bits(x)
|
||||
e := uint(bits>>shift) & mask
|
||||
if e < bias {
|
||||
// Round abs(x) < 1 including denormals.
|
||||
bits &= signMask // +-0
|
||||
if e == bias-1 {
|
||||
bits |= uvone // +-1
|
||||
}
|
||||
} else if e < bias+shift {
|
||||
// Round any abs(x) >= 1 containing a fractional component [0,1).
|
||||
//
|
||||
// Numbers with larger exponents are returned unchanged since they
|
||||
// must be either an integer, infinity, or NaN.
|
||||
const half = 1 << (shift - 1)
|
||||
e -= bias
|
||||
bits += half >> e
|
||||
bits &^= fracMask >> e
|
||||
}
|
||||
return math.Float64frombits(bits)
|
||||
}
|
11
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
generated
vendored
Normal file
11
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build go1.6
|
||||
|
||||
package sdkrand
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// Read provides the stub for math.Rand.Read method support for go version's
|
||||
// 1.6 and greater.
|
||||
func Read(r *rand.Rand, p []byte) (int, error) {
|
||||
return r.Read(p)
|
||||
}
|
24
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
generated
vendored
Normal file
24
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// +build !go1.6
|
||||
|
||||
package sdkrand
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// Read backfills Go 1.6's math.Rand.Reader for Go 1.5
|
||||
func Read(r *rand.Rand, p []byte) (n int, err error) {
|
||||
// Copy of Go standard libraries math package's read function not added to
|
||||
// standard library until Go 1.6.
|
||||
var pos int8
|
||||
var val int64
|
||||
for n = 0; n < len(p); n++ {
|
||||
if pos == 0 {
|
||||
val = r.Int63()
|
||||
pos = 7
|
||||
}
|
||||
p[n] = byte(val)
|
||||
val >>= 8
|
||||
pos--
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
20
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
|
@ -1,8 +1,11 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkmath"
|
||||
)
|
||||
|
||||
// Names of time formats supported by the SDK
|
||||
|
@ -13,12 +16,19 @@ const (
|
|||
)
|
||||
|
||||
// Time formats supported by the SDK
|
||||
// Output time is intended to not contain decimals
|
||||
const (
|
||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// This format is used for output time without seconds precision
|
||||
RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
|
||||
|
||||
// This format is used for output time without seconds precision
|
||||
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// IsKnownTimestampFormat returns if the timestamp format name
|
||||
|
@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string {
|
|||
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
return t.Format(RFC822TimeFormat)
|
||||
return t.Format(RFC822OutputTimeFormat)
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601TimeFormat)
|
||||
return t.Format(ISO8601OutputTimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
default:
|
||||
|
@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) {
|
|||
return time.Parse(ISO8601TimeFormat, value)
|
||||
case UnixTimeFormatName:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
_, dec := math.Modf(v)
|
||||
dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(int64(v), 0), nil
|
||||
return time.Unix(int64(v), int64(dec*(1e9))), nil
|
||||
default:
|
||||
panic("unknown timestamp format name, " + formatName)
|
||||
}
|
||||
|
|
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
generated
vendored
Normal file
32
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
package xmlutil
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type xmlAttrSlice []xml.Attr
|
||||
|
||||
func (x xmlAttrSlice) Len() int {
|
||||
return len(x)
|
||||
}
|
||||
|
||||
func (x xmlAttrSlice) Less(i, j int) bool {
|
||||
spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
|
||||
localI, localJ := x[i].Name.Local, x[j].Name.Local
|
||||
valueI, valueJ := x[i].Value, x[j].Value
|
||||
|
||||
spaceCmp := strings.Compare(spaceI, spaceJ)
|
||||
localCmp := strings.Compare(localI, localJ)
|
||||
valueCmp := strings.Compare(valueI, valueJ)
|
||||
|
||||
if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (x xmlAttrSlice) Swap(i, j int) {
|
||||
x[i], x[j] = x[j], x[i]
|
||||
}
|
13
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
|
@ -119,7 +119,18 @@ func (n *XMLNode) findElem(name string) (string, bool) {
|
|||
|
||||
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
|
||||
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
|
||||
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
|
||||
// Sort Attributes
|
||||
attrs := node.Attr
|
||||
if sorted {
|
||||
sortedAttrs := make([]xml.Attr, len(attrs))
|
||||
for _, k := range node.Attr {
|
||||
sortedAttrs = append(sortedAttrs, k)
|
||||
}
|
||||
sort.Sort(xmlAttrSlice(sortedAttrs))
|
||||
attrs = sortedAttrs
|
||||
}
|
||||
|
||||
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
|
||||
|
||||
if node.Text != "" {
|
||||
e.EncodeToken(xml.CharData([]byte(node.Text)))
|
||||
|
|
224
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
224
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
|
@ -7043,7 +7043,7 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
|
|||
}
|
||||
|
||||
type AbortMultipartUploadInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
|
||||
|
||||
// Name of the bucket to which the multipart upload was initiated.
|
||||
//
|
||||
|
@ -8084,7 +8084,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
|
|||
}
|
||||
|
||||
type CompleteMultipartUploadInput struct {
|
||||
_ struct{} `type:"structure" payload:"MultipartUpload"`
|
||||
_ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -8404,7 +8404,7 @@ func (s *ContinuationEvent) UnmarshalEvent(
|
|||
}
|
||||
|
||||
type CopyObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"CopyObjectRequest" type:"structure"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
@ -9025,7 +9025,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke
|
|||
}
|
||||
|
||||
type CreateBucketInput struct {
|
||||
_ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
|
||||
_ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
|
||||
|
||||
// The canned ACL to apply to the bucket.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
||||
|
@ -9166,7 +9166,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
|
|||
}
|
||||
|
||||
type CreateMultipartUploadInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
@ -9708,7 +9708,7 @@ func (s *Delete) SetQuiet(v bool) *Delete {
|
|||
}
|
||||
|
||||
type DeleteBucketAnalyticsConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket from which an analytics configuration is deleted.
|
||||
//
|
||||
|
@ -9784,7 +9784,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketCorsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -9844,7 +9844,7 @@ func (s DeleteBucketCorsOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketEncryptionInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the server-side encryption configuration
|
||||
// to delete.
|
||||
|
@ -9907,7 +9907,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -9953,7 +9953,7 @@ func (s *DeleteBucketInput) getBucket() (v string) {
|
|||
}
|
||||
|
||||
type DeleteBucketInventoryConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the inventory configuration to delete.
|
||||
//
|
||||
|
@ -10029,7 +10029,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketLifecycleInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10089,7 +10089,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketMetricsConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the metrics configuration to delete.
|
||||
//
|
||||
|
@ -10179,7 +10179,7 @@ func (s DeleteBucketOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketPolicyInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10239,7 +10239,7 @@ func (s DeleteBucketPolicyOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketReplicationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"`
|
||||
|
||||
// The bucket name.
|
||||
//
|
||||
|
@ -10304,7 +10304,7 @@ func (s DeleteBucketReplicationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketTaggingInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10364,7 +10364,7 @@ func (s DeleteBucketTaggingOutput) GoString() string {
|
|||
}
|
||||
|
||||
type DeleteBucketWebsiteInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10510,7 +10510,7 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
|
|||
}
|
||||
|
||||
type DeleteObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10656,7 +10656,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
|
|||
}
|
||||
|
||||
type DeleteObjectTaggingInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10749,7 +10749,7 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO
|
|||
}
|
||||
|
||||
type DeleteObjectsInput struct {
|
||||
_ struct{} `type:"structure" payload:"Delete"`
|
||||
_ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -10885,7 +10885,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
|
|||
}
|
||||
|
||||
type DeletePublicAccessBlockInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"`
|
||||
|
||||
// The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
|
||||
//
|
||||
|
@ -11341,7 +11341,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule {
|
|||
}
|
||||
|
||||
type GetBucketAccelerateConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"`
|
||||
|
||||
// Name of the bucket for which the accelerate configuration is retrieved.
|
||||
//
|
||||
|
@ -11412,7 +11412,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA
|
|||
}
|
||||
|
||||
type GetBucketAclInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -11489,7 +11489,7 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
|
|||
}
|
||||
|
||||
type GetBucketAnalyticsConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket from which an analytics configuration is retrieved.
|
||||
//
|
||||
|
@ -11574,7 +11574,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana
|
|||
}
|
||||
|
||||
type GetBucketCorsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -11642,7 +11642,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
|
|||
}
|
||||
|
||||
type GetBucketEncryptionInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket from which the server-side encryption configuration
|
||||
// is retrieved.
|
||||
|
@ -11714,7 +11714,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv
|
|||
}
|
||||
|
||||
type GetBucketInventoryConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the inventory configuration to retrieve.
|
||||
//
|
||||
|
@ -11799,7 +11799,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv
|
|||
}
|
||||
|
||||
type GetBucketLifecycleConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -11867,7 +11867,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge
|
|||
}
|
||||
|
||||
type GetBucketLifecycleInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -11935,7 +11935,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput
|
|||
}
|
||||
|
||||
type GetBucketLocationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12003,7 +12003,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca
|
|||
}
|
||||
|
||||
type GetBucketLoggingInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12075,7 +12075,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket
|
|||
}
|
||||
|
||||
type GetBucketMetricsConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the metrics configuration to retrieve.
|
||||
//
|
||||
|
@ -12160,7 +12160,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics
|
|||
}
|
||||
|
||||
type GetBucketNotificationConfigurationRequest struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"`
|
||||
|
||||
// Name of the bucket to get the notification configuration for.
|
||||
//
|
||||
|
@ -12208,7 +12208,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
|
|||
}
|
||||
|
||||
type GetBucketPolicyInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12277,7 +12277,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
|
|||
}
|
||||
|
||||
type GetBucketPolicyStatusInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"`
|
||||
|
||||
// The name of the Amazon S3 bucket whose policy status you want to retrieve.
|
||||
//
|
||||
|
@ -12348,7 +12348,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke
|
|||
}
|
||||
|
||||
type GetBucketReplicationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12418,7 +12418,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC
|
|||
}
|
||||
|
||||
type GetBucketRequestPaymentInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12487,7 +12487,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym
|
|||
}
|
||||
|
||||
type GetBucketTaggingInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12556,7 +12556,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
|
|||
}
|
||||
|
||||
type GetBucketVersioningInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12636,7 +12636,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp
|
|||
}
|
||||
|
||||
type GetBucketWebsiteInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12730,7 +12730,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb
|
|||
}
|
||||
|
||||
type GetObjectAclInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -12853,7 +12853,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
|
|||
}
|
||||
|
||||
type GetObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -13090,7 +13090,7 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
|
|||
}
|
||||
|
||||
type GetObjectLegalHoldInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"`
|
||||
|
||||
// The bucket containing the object whose Legal Hold status you want to retrieve.
|
||||
//
|
||||
|
@ -13199,7 +13199,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
|
|||
}
|
||||
|
||||
type GetObjectLockConfigurationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"`
|
||||
|
||||
// The bucket whose object lock configuration you want to retrieve.
|
||||
//
|
||||
|
@ -13581,7 +13581,7 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput
|
|||
}
|
||||
|
||||
type GetObjectRetentionInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"`
|
||||
|
||||
// The bucket containing the object whose retention settings you want to retrieve.
|
||||
//
|
||||
|
@ -13690,7 +13690,7 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje
|
|||
}
|
||||
|
||||
type GetObjectTaggingInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -13790,7 +13790,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput
|
|||
}
|
||||
|
||||
type GetObjectTorrentInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -13895,7 +13895,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu
|
|||
}
|
||||
|
||||
type GetPublicAccessBlockInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"`
|
||||
|
||||
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
||||
// want to retrieve.
|
||||
|
@ -14126,7 +14126,7 @@ func (s *Grantee) SetURI(v string) *Grantee {
|
|||
}
|
||||
|
||||
type HeadBucketInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"HeadBucketRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -14186,7 +14186,7 @@ func (s HeadBucketOutput) GoString() string {
|
|||
}
|
||||
|
||||
type HeadObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"HeadObjectRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -15661,7 +15661,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
|
|||
}
|
||||
|
||||
type ListBucketAnalyticsConfigurationsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket from which analytics configurations are retrieved.
|
||||
//
|
||||
|
@ -15773,7 +15773,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str
|
|||
}
|
||||
|
||||
type ListBucketInventoryConfigurationsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the inventory configurations to retrieve.
|
||||
//
|
||||
|
@ -15887,7 +15887,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str
|
|||
}
|
||||
|
||||
type ListBucketMetricsConfigurationsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"`
|
||||
|
||||
// The name of the bucket containing the metrics configurations to retrieve.
|
||||
//
|
||||
|
@ -16047,7 +16047,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
|
|||
}
|
||||
|
||||
type ListMultipartUploadsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -16291,7 +16291,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti
|
|||
}
|
||||
|
||||
type ListObjectVersionsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -16524,7 +16524,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe
|
|||
}
|
||||
|
||||
type ListObjectsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListObjectsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -16736,7 +16736,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
|
|||
}
|
||||
|
||||
type ListObjectsV2Input struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
|
||||
|
||||
// Name of the bucket to list.
|
||||
//
|
||||
|
@ -16997,7 +16997,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
|
|||
}
|
||||
|
||||
type ListPartsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ListPartsRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -18622,7 +18622,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi
|
|||
}
|
||||
|
||||
type PutBucketAccelerateConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"AccelerateConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"`
|
||||
|
||||
// Specifies the Accelerate Configuration you want to set for the bucket.
|
||||
//
|
||||
|
@ -18698,7 +18698,7 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketAclInput struct {
|
||||
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
|
||||
_ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"`
|
||||
|
||||
// The canned ACL to apply to the bucket.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
||||
|
@ -18827,7 +18827,7 @@ func (s PutBucketAclOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketAnalyticsConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"`
|
||||
|
||||
// The configuration and any analyses for the analytics filter.
|
||||
//
|
||||
|
@ -18922,7 +18922,7 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketCorsInput struct {
|
||||
_ struct{} `type:"structure" payload:"CORSConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19004,7 +19004,7 @@ func (s PutBucketCorsOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketEncryptionInput struct {
|
||||
_ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"`
|
||||
|
||||
// Specifies default encryption for a bucket using server-side encryption with
|
||||
// Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
|
||||
|
@ -19089,7 +19089,7 @@ func (s PutBucketEncryptionOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketInventoryConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"InventoryConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"`
|
||||
|
||||
// The name of the bucket where the inventory configuration will be stored.
|
||||
//
|
||||
|
@ -19184,7 +19184,7 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketLifecycleConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19260,7 +19260,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketLifecycleInput struct {
|
||||
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19333,7 +19333,7 @@ func (s PutBucketLifecycleOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketLoggingInput struct {
|
||||
_ struct{} `type:"structure" payload:"BucketLoggingStatus"`
|
||||
_ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19410,7 +19410,7 @@ func (s PutBucketLoggingOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketMetricsConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"MetricsConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"`
|
||||
|
||||
// The name of the bucket for which the metrics configuration is set.
|
||||
//
|
||||
|
@ -19505,7 +19505,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketNotificationConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19585,7 +19585,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketNotificationInput struct {
|
||||
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19657,7 +19657,7 @@ func (s PutBucketNotificationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketPolicyInput struct {
|
||||
_ struct{} `type:"structure" payload:"Policy"`
|
||||
_ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19741,7 +19741,7 @@ func (s PutBucketPolicyOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketReplicationInput struct {
|
||||
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19830,7 +19830,7 @@ func (s PutBucketReplicationOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketRequestPaymentInput struct {
|
||||
_ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19907,7 +19907,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketTaggingInput struct {
|
||||
_ struct{} `type:"structure" payload:"Tagging"`
|
||||
_ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -19984,7 +19984,7 @@ func (s PutBucketTaggingOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketVersioningInput struct {
|
||||
_ struct{} `type:"structure" payload:"VersioningConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -20070,7 +20070,7 @@ func (s PutBucketVersioningOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutBucketWebsiteInput struct {
|
||||
_ struct{} `type:"structure" payload:"WebsiteConfiguration"`
|
||||
_ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -20149,7 +20149,7 @@ func (s PutBucketWebsiteOutput) GoString() string {
|
|||
}
|
||||
|
||||
type PutObjectAclInput struct {
|
||||
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
|
||||
_ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
@ -20324,7 +20324,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
|
|||
}
|
||||
|
||||
type PutObjectInput struct {
|
||||
_ struct{} `type:"structure" payload:"Body"`
|
||||
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
@ -20671,7 +20671,7 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
|
|||
}
|
||||
|
||||
type PutObjectLegalHoldInput struct {
|
||||
_ struct{} `type:"structure" payload:"LegalHold"`
|
||||
_ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"`
|
||||
|
||||
// The bucket containing the object that you want to place a Legal Hold on.
|
||||
//
|
||||
|
@ -20791,7 +20791,7 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
|
|||
}
|
||||
|
||||
type PutObjectLockConfigurationInput struct {
|
||||
_ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
|
||||
_ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"`
|
||||
|
||||
// The bucket whose object lock configuration you want to create or replace.
|
||||
//
|
||||
|
@ -20998,7 +20998,7 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
|
|||
}
|
||||
|
||||
type PutObjectRetentionInput struct {
|
||||
_ struct{} `type:"structure" payload:"Retention"`
|
||||
_ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"`
|
||||
|
||||
// The bucket that contains the object you want to apply this Object Retention
|
||||
// configuration to.
|
||||
|
@ -21129,7 +21129,7 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti
|
|||
}
|
||||
|
||||
type PutObjectTaggingInput struct {
|
||||
_ struct{} `type:"structure" payload:"Tagging"`
|
||||
_ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -21237,7 +21237,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput
|
|||
}
|
||||
|
||||
type PutPublicAccessBlockInput struct {
|
||||
_ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
|
||||
_ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"`
|
||||
|
||||
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
|
||||
// want to set.
|
||||
|
@ -21999,7 +21999,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
|
|||
}
|
||||
|
||||
type RestoreObjectInput struct {
|
||||
_ struct{} `type:"structure" payload:"RestoreRequest"`
|
||||
_ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -22464,6 +22464,41 @@ func (s SSES3) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
type ScanRange struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies the end of the byte range. This parameter is optional. Valid values:
|
||||
// non-negative integers. The default value is one less than the size of the
|
||||
// object being queried.
|
||||
End *int64 `type:"long"`
|
||||
|
||||
// Specifies the start of the byte range. This parameter is optional. Valid
|
||||
// values: non-negative integers. The default value is 0.
|
||||
Start *int64 `type:"long"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ScanRange) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ScanRange) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetEnd sets the End field's value.
|
||||
func (s *ScanRange) SetEnd(v int64) *ScanRange {
|
||||
s.End = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetStart sets the Start field's value.
|
||||
func (s *ScanRange) SetStart(v int64) *ScanRange {
|
||||
s.Start = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SelectObjectContentEventStream provides handling of EventStreams for
|
||||
// the SelectObjectContent API.
|
||||
//
|
||||
|
@ -22503,6 +22538,8 @@ type SelectObjectContentEventStream struct {
|
|||
// may result in resource leaks.
|
||||
func (es *SelectObjectContentEventStream) Close() (err error) {
|
||||
es.Reader.Close()
|
||||
es.StreamCloser.Close()
|
||||
|
||||
return es.Err()
|
||||
}
|
||||
|
||||
|
@ -22512,8 +22549,6 @@ func (es *SelectObjectContentEventStream) Err() error {
|
|||
if err := es.Reader.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
es.StreamCloser.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -22738,6 +22773,12 @@ type SelectObjectContentInput struct {
|
|||
// The SSE Customer Key MD5. For more information, see Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the byte range of the object to get the records from. A record
|
||||
// is processed when its first byte is contained by the range. This parameter
|
||||
// is optional, but when specified, it must not be empty. See RFC 2616, Section
|
||||
// 14.35.1 about how to specify the start and end of the range.
|
||||
ScanRange *ScanRange `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -22858,6 +22899,12 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC
|
|||
return s
|
||||
}
|
||||
|
||||
// SetScanRange sets the ScanRange field's value.
|
||||
func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput {
|
||||
s.ScanRange = v
|
||||
return s
|
||||
}
|
||||
|
||||
type SelectObjectContentOutput struct {
|
||||
_ struct{} `type:"structure" payload:"Payload"`
|
||||
|
||||
|
@ -23715,7 +23762,7 @@ func (s *Transition) SetStorageClass(v string) *Transition {
|
|||
}
|
||||
|
||||
type UploadPartCopyInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
@ -24045,7 +24092,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy
|
|||
}
|
||||
|
||||
type UploadPartInput struct {
|
||||
_ struct{} `type:"structure" payload:"Body"`
|
||||
_ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"`
|
||||
|
||||
// Object data.
|
||||
Body io.ReadSeeker `type:"blob"`
|
||||
|
@ -24657,6 +24704,9 @@ const (
|
|||
|
||||
// InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value
|
||||
InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus"
|
||||
|
||||
// InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value
|
||||
InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
81
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go
generated
vendored
Normal file
81
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/buffered_read_seeker.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// BufferedReadSeeker is buffered io.ReadSeeker
|
||||
type BufferedReadSeeker struct {
|
||||
r io.ReadSeeker
|
||||
buffer []byte
|
||||
readIdx, writeIdx int
|
||||
}
|
||||
|
||||
// NewBufferedReadSeeker returns a new BufferedReadSeeker
|
||||
// if len(b) == 0 then the buffer will be initialized to 64 KiB.
|
||||
func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
|
||||
if len(b) == 0 {
|
||||
b = make([]byte, 64*1024)
|
||||
}
|
||||
return &BufferedReadSeeker{r: r, buffer: b}
|
||||
}
|
||||
|
||||
func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
|
||||
b.r = r
|
||||
b.readIdx, b.writeIdx = 0, 0
|
||||
}
|
||||
|
||||
// Read will read up len(p) bytes into p and will return
|
||||
// the number of bytes read and any error that occurred.
|
||||
// If the len(p) > the buffer size then a single read request
|
||||
// will be issued to the underlying io.ReadSeeker for len(p) bytes.
|
||||
// A Read request will at most perform a single Read to the underlying
|
||||
// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
|
||||
func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if b.readIdx == b.writeIdx {
|
||||
if len(p) >= len(b.buffer) {
|
||||
n, err = b.r.Read(p)
|
||||
return n, err
|
||||
}
|
||||
b.readIdx, b.writeIdx = 0, 0
|
||||
|
||||
n, err = b.r.Read(b.buffer)
|
||||
if n == 0 {
|
||||
return n, err
|
||||
}
|
||||
|
||||
b.writeIdx += n
|
||||
}
|
||||
|
||||
n = copy(p, b.buffer[b.readIdx:b.writeIdx])
|
||||
b.readIdx += n
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Seek will position then underlying io.ReadSeeker to the given offset
|
||||
// and will clear the buffer.
|
||||
func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
n, err := b.r.Seek(offset, whence)
|
||||
|
||||
b.reset(b.r)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt will read up to len(p) bytes at the given file offset.
|
||||
// This will result in the buffer being cleared.
|
||||
func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
|
||||
_, err := b.Seek(off, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return b.Read(p)
|
||||
}
|
7
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go
generated
vendored
Normal file
7
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !windows
|
||||
|
||||
package s3manager
|
||||
|
||||
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
|
||||
return nil
|
||||
}
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go
generated
vendored
Normal file
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_read_seeker_write_to_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package s3manager
|
||||
|
||||
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
|
||||
return NewBufferedReadSeekerWriteToPool(1024 * 1024)
|
||||
}
|
7
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go
generated
vendored
Normal file
7
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
// +build !windows
|
||||
|
||||
package s3manager
|
||||
|
||||
func defaultDownloadBufferProvider() WriterReadFromProvider {
|
||||
return nil
|
||||
}
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go
generated
vendored
Normal file
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/default_writer_read_from_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package s3manager
|
||||
|
||||
func defaultDownloadBufferProvider() WriterReadFromProvider {
|
||||
return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
|
||||
}
|
90
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
90
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
|
@ -25,13 +25,25 @@ const DefaultDownloadPartSize = 1024 * 1024 * 5
|
|||
// when using Download().
|
||||
const DefaultDownloadConcurrency = 5
|
||||
|
||||
type errReadingBody struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *errReadingBody) Error() string {
|
||||
return fmt.Sprintf("failed to read part body: %v", e.err)
|
||||
}
|
||||
|
||||
func (e *errReadingBody) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// The Downloader structure that calls Download(). It is safe to call Download()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Downloader's properties is not safe to be done concurrently.
|
||||
type Downloader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultDownloadPartSize value will be used.
|
||||
// The size (in bytes) to request from S3 for each part.
|
||||
// The minimum allowed part size is 5MB, and if this value is set to zero,
|
||||
// the DefaultDownloadPartSize value will be used.
|
||||
//
|
||||
// PartSize is ignored if the Range input parameter is provided.
|
||||
PartSize int64
|
||||
|
@ -50,6 +62,14 @@ type Downloader struct {
|
|||
// List of request options that will be passed down to individual API
|
||||
// operation requests made by the downloader.
|
||||
RequestOptions []request.Option
|
||||
|
||||
// Defines the buffer strategy used when downloading a part.
|
||||
//
|
||||
// If a WriterReadFromProvider is given the Download manager
|
||||
// will pass the io.WriterAt of the Download request to the provider
|
||||
// and will use the returned WriterReadFrom from the provider as the
|
||||
// destination writer when copying from http response body.
|
||||
BufferProvider WriterReadFromProvider
|
||||
}
|
||||
|
||||
// WithDownloaderRequestOptions appends to the Downloader's API request options.
|
||||
|
@ -77,10 +97,15 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
|
|||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
|
||||
return newDownloader(s3.New(c), options...)
|
||||
}
|
||||
|
||||
func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
S3: client,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
BufferProvider: defaultDownloadBufferProvider(),
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
|
@ -109,16 +134,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl
|
|||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: svc,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
return newDownloader(svc, options...)
|
||||
}
|
||||
|
||||
type maxRetrier interface {
|
||||
|
@ -405,18 +421,20 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
|
|||
var n int64
|
||||
var err error
|
||||
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
|
||||
var resp *s3.GetObjectOutput
|
||||
resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err = io.Copy(&chunk, resp.Body)
|
||||
resp.Body.Close()
|
||||
n, err = d.tryDownloadChunk(in, &chunk)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
// Check if the returned error is an errReadingBody.
|
||||
// If err is errReadingBody this indicates that an error
|
||||
// occurred while copying the http response body.
|
||||
// If this occurs we unwrap the err to set the underlying error
|
||||
// and attempt any remaining retries.
|
||||
if bodyErr, ok := err.(*errReadingBody); ok {
|
||||
err = bodyErr.Unwrap()
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
chunk.cur = 0
|
||||
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
|
||||
|
@ -429,6 +447,28 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
|
||||
cleanup := func() {}
|
||||
if d.cfg.BufferProvider != nil {
|
||||
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err := io.Copy(w, resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return n, &errReadingBody{err: err}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
|
||||
s, ok := svc.(*s3.S3)
|
||||
if !ok {
|
||||
|
|
65
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go
generated
vendored
Normal file
65
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/read_seeker_write_to.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
|
||||
type ReadSeekerWriteTo interface {
|
||||
io.ReadSeeker
|
||||
io.WriterTo
|
||||
}
|
||||
|
||||
// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
|
||||
// implementation.
|
||||
type BufferedReadSeekerWriteTo struct {
|
||||
*BufferedReadSeeker
|
||||
}
|
||||
|
||||
// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
|
||||
// an error occurs. Returns the number of bytes written and any error encountered during the write.
|
||||
func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
|
||||
return io.Copy(writer, b.BufferedReadSeeker)
|
||||
}
|
||||
|
||||
// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
|
||||
type ReadSeekerWriteToProvider interface {
|
||||
GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
|
||||
}
|
||||
|
||||
// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
|
||||
// []byte slices for buffering parts in memory
|
||||
type BufferedReadSeekerWriteToPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
|
||||
// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
|
||||
// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
|
||||
// respectively will default to copying 32 KiB.
|
||||
func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
|
||||
if size < 65536 {
|
||||
size = 65536
|
||||
}
|
||||
|
||||
return &BufferedReadSeekerWriteToPool{
|
||||
pool: sync.Pool{New: func() interface{} {
|
||||
return make([]byte, size)
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
|
||||
// The provided cleanup must be called after operations have been completed on the
|
||||
// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
|
||||
func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
|
||||
buffer := p.pool.Get().([]byte)
|
||||
|
||||
r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
|
||||
cleanup = func() {
|
||||
p.pool.Put(buffer)
|
||||
}
|
||||
|
||||
return r, cleanup
|
||||
}
|
177
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
177
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -162,6 +162,12 @@ type Uploader struct {
|
|||
// List of request options that will be passed down to individual API
|
||||
// operation requests made by the uploader.
|
||||
RequestOptions []request.Option
|
||||
|
||||
// Defines the buffer strategy used when uploading a part
|
||||
BufferProvider ReadSeekerWriteToProvider
|
||||
|
||||
// partPool allows for the re-usage of streaming payload part buffers between upload calls
|
||||
partPool *partPool
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
|
||||
|
@ -181,18 +187,25 @@ type Uploader struct {
|
|||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
||||
return newUploader(s3.New(c), options...)
|
||||
}
|
||||
|
||||
func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: s3.New(c),
|
||||
S3: client,
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
BufferProvider: defaultUploadBufferProvider(),
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
u.partPool = newPartPool(u.PartSize)
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
|
@ -215,19 +228,7 @@ func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader
|
|||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: svc,
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
return newUploader(svc, options...)
|
||||
}
|
||||
|
||||
// Upload uploads an object to S3, intelligently buffering large files into
|
||||
|
@ -287,6 +288,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
|
|||
for _, opt := range opts {
|
||||
opt(&i.cfg)
|
||||
}
|
||||
|
||||
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
|
||||
|
||||
return i.upload()
|
||||
|
@ -356,8 +358,6 @@ type uploader struct {
|
|||
|
||||
readerPos int64 // current reader position
|
||||
totalSize int64 // set to -1 if the size is not known
|
||||
|
||||
bufferPool sync.Pool
|
||||
}
|
||||
|
||||
// internal logic for deciding whether to upload a single part or use a
|
||||
|
@ -373,15 +373,16 @@ func (u *uploader) upload() (*UploadOutput, error) {
|
|||
}
|
||||
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, part, err := u.nextReader()
|
||||
reader, _, cleanup, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
return u.singlePart(reader)
|
||||
return u.singlePart(reader, cleanup)
|
||||
} else if err != nil {
|
||||
cleanup()
|
||||
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
|
||||
}
|
||||
|
||||
mu := multiuploader{uploader: u}
|
||||
return mu.upload(reader, part)
|
||||
return mu.upload(reader, cleanup)
|
||||
}
|
||||
|
||||
// init will initialize all default options.
|
||||
|
@ -396,8 +397,10 @@ func (u *uploader) init() error {
|
|||
u.cfg.MaxUploadParts = MaxUploadParts
|
||||
}
|
||||
|
||||
u.bufferPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
||||
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
|
||||
// so that we return []byte slices of the correct size
|
||||
if u.cfg.partPool == nil || u.cfg.partPool.partSize != u.cfg.PartSize {
|
||||
u.cfg.partPool = newPartPool(u.cfg.PartSize)
|
||||
}
|
||||
|
||||
// Try to get the total size for some optimizations
|
||||
|
@ -433,7 +436,7 @@ func (u *uploader) initSize() error {
|
|||
// This operation increases the shared u.readerPos counter, but note that it
|
||||
// does not need to be wrapped in a mutex because nextReader is only called
|
||||
// from the main thread.
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
|
||||
type readerAtSeeker interface {
|
||||
io.ReaderAt
|
||||
io.ReadSeeker
|
||||
|
@ -452,17 +455,32 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
reader := io.NewSectionReader(r, u.readerPos, n)
|
||||
var (
|
||||
reader io.ReadSeeker
|
||||
cleanup func()
|
||||
)
|
||||
|
||||
reader = io.NewSectionReader(r, u.readerPos, n)
|
||||
if u.cfg.BufferProvider != nil {
|
||||
reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
|
||||
} else {
|
||||
cleanup = func() {}
|
||||
}
|
||||
|
||||
u.readerPos += n
|
||||
|
||||
return reader, int(n), nil, err
|
||||
return reader, int(n), cleanup, err
|
||||
|
||||
default:
|
||||
part := u.bufferPool.Get().([]byte)
|
||||
part := u.cfg.partPool.Get().([]byte)
|
||||
n, err := readFillBuf(r, part)
|
||||
u.readerPos += int64(n)
|
||||
|
||||
return bytes.NewReader(part[0:n]), n, part, err
|
||||
cleanup := func() {
|
||||
u.cfg.partPool.Put(part)
|
||||
}
|
||||
|
||||
return bytes.NewReader(part[0:n]), n, cleanup, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -479,10 +497,12 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
|
|||
// singlePart contains upload logic for uploading a single chunk via
|
||||
// a regular PutObject request. Multipart requests require at least two
|
||||
// parts, or at least 5MB of data.
|
||||
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
|
||||
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
|
||||
defer cleanup()
|
||||
|
||||
params := &s3.PutObjectInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
params.Body = buf
|
||||
params.Body = r
|
||||
|
||||
// Need to use request form because URL generated in request is
|
||||
// used in return.
|
||||
|
@ -512,9 +532,9 @@ type multiuploader struct {
|
|||
|
||||
// keeps track of a single chunk of data being sent to S3.
|
||||
type chunk struct {
|
||||
buf io.ReadSeeker
|
||||
part []byte
|
||||
num int64
|
||||
buf io.ReadSeeker
|
||||
num int64
|
||||
cleanup func()
|
||||
}
|
||||
|
||||
// completedParts is a wrapper to make parts sortable by their part number,
|
||||
|
@ -527,7 +547,7 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa
|
|||
|
||||
// upload will perform a multipart upload using the firstBuf buffer containing
|
||||
// the first chunk of data.
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
|
||||
params := &s3.CreateMultipartUploadInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
|
||||
|
@ -547,46 +567,29 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
|
|||
|
||||
// Send part 1 to the workers
|
||||
var num int64 = 1
|
||||
ch <- chunk{buf: firstBuf, part: firstPart, num: num}
|
||||
ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
|
||||
|
||||
// Read and queue the rest of the parts
|
||||
for u.geterr() == nil && err == nil {
|
||||
var reader io.ReadSeeker
|
||||
var nextChunkLen int
|
||||
var part []byte
|
||||
reader, nextChunkLen, part, err = u.nextReader()
|
||||
var (
|
||||
reader io.ReadSeeker
|
||||
nextChunkLen int
|
||||
ok bool
|
||||
)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
u.seterr(awserr.New(
|
||||
"ReadRequestBody",
|
||||
"read multipart upload data failed",
|
||||
err))
|
||||
break
|
||||
}
|
||||
|
||||
if nextChunkLen == 0 {
|
||||
// No need to upload empty part, if file was empty to start
|
||||
// with empty single part would of been created and never
|
||||
// started multipart upload.
|
||||
reader, nextChunkLen, cleanup, err = u.nextReader()
|
||||
ok, err = u.shouldContinue(num, nextChunkLen, err)
|
||||
if !ok {
|
||||
cleanup()
|
||||
if err != nil {
|
||||
u.seterr(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
num++
|
||||
// This upload exceeded maximum number of supported parts, error now.
|
||||
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
|
||||
var msg string
|
||||
if num > int64(u.cfg.MaxUploadParts) {
|
||||
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
u.cfg.MaxUploadParts)
|
||||
} else {
|
||||
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
MaxUploadParts)
|
||||
}
|
||||
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
|
||||
break
|
||||
}
|
||||
|
||||
ch <- chunk{buf: reader, part: part, num: num}
|
||||
ch <- chunk{buf: reader, num: num, cleanup: cleanup}
|
||||
}
|
||||
|
||||
// Close the channel, wait for workers, and complete upload
|
||||
|
@ -620,6 +623,35 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
|
||||
if err != nil && err != io.EOF {
|
||||
return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
|
||||
}
|
||||
|
||||
if nextChunkLen == 0 {
|
||||
// No need to upload empty part, if file was empty to start
|
||||
// with empty single part would of been created and never
|
||||
// started multipart upload.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
part++
|
||||
// This upload exceeded maximum number of supported parts, error now.
|
||||
if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
|
||||
var msg string
|
||||
if part > int64(u.cfg.MaxUploadParts) {
|
||||
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
u.cfg.MaxUploadParts)
|
||||
} else {
|
||||
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
MaxUploadParts)
|
||||
}
|
||||
return false, awserr.New("TotalPartsExceeded", msg, nil)
|
||||
}
|
||||
|
||||
return true, err
|
||||
}
|
||||
|
||||
// readChunk runs in worker goroutines to pull chunks off of the ch channel
|
||||
// and send() them as UploadPart requests.
|
||||
func (u *multiuploader) readChunk(ch chan chunk) {
|
||||
|
@ -651,9 +683,9 @@ func (u *multiuploader) send(c chunk) error {
|
|||
SSECustomerKey: u.in.SSECustomerKey,
|
||||
PartNumber: &c.num,
|
||||
}
|
||||
|
||||
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
// put the byte array back into the pool to conserve memory
|
||||
u.bufferPool.Put(c.part)
|
||||
c.cleanup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -725,3 +757,18 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
|||
|
||||
return resp
|
||||
}
|
||||
|
||||
type partPool struct {
|
||||
partSize int64
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
func newPartPool(partSize int64) *partPool {
|
||||
p := &partPool{partSize: partSize}
|
||||
|
||||
p.New = func() interface{} {
|
||||
return make([]byte, p.partSize)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
generated
vendored
|
@ -12,7 +12,7 @@ import (
|
|||
// package's PutObjectInput with the exception that the Body member is an
|
||||
// io.Reader instead of an io.ReadSeeker.
|
||||
type UploadInput struct {
|
||||
_ struct{} `type:"structure" payload:"Body"`
|
||||
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
|
||||
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
|
75
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
generated
vendored
Normal file
75
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/writer_read_from.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
|
||||
type WriterReadFrom interface {
|
||||
io.Writer
|
||||
io.ReaderFrom
|
||||
}
|
||||
|
||||
// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
|
||||
type WriterReadFromProvider interface {
|
||||
GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
|
||||
}
|
||||
|
||||
type bufferedWriter interface {
|
||||
WriterReadFrom
|
||||
Flush() error
|
||||
Reset(io.Writer)
|
||||
}
|
||||
|
||||
type bufferedReadFrom struct {
|
||||
bufferedWriter
|
||||
}
|
||||
|
||||
func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := b.bufferedWriter.ReadFrom(r)
|
||||
if flushErr := b.Flush(); flushErr != nil && err == nil {
|
||||
err = flushErr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
|
||||
// to manage allocation and reuse of *bufio.Writer structures.
|
||||
type PooledBufferedReadFromProvider struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
|
||||
// Size is used to control the size of the underlying *bufio.Writer created for
|
||||
// calls to GetReadFrom.
|
||||
func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
|
||||
if size < int(32*sdkio.KibiByte) {
|
||||
size = int(64 * sdkio.KibiByte)
|
||||
}
|
||||
|
||||
return &PooledBufferedReadFromProvider{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
|
||||
// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
|
||||
// has been completed in order to allow the reuse of the *bufio.Writer
|
||||
func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
|
||||
buffer := p.pool.Get().(*bufferedReadFrom)
|
||||
buffer.Reset(writer)
|
||||
r = buffer
|
||||
cleanup = func() {
|
||||
buffer.Reset(nil) // Reset to nil writer to release reference
|
||||
p.pool.Put(buffer)
|
||||
}
|
||||
return r, cleanup
|
||||
}
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -46,11 +46,11 @@ const (
|
|||
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 {
|
||||
svc := &S3{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
|
@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
PartitionID: partitionID,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2006-03-01",
|
||||
},
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -676,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
|
|||
//
|
||||
// Returned Error Codes:
|
||||
// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
|
||||
// The error returned if the message passed to DecodeAuthorizationMessage was
|
||||
// invalid. This can happen if the token contains invalid characters, such as
|
||||
// linebreaks.
|
||||
// This error is returned if the message passed to DecodeAuthorizationMessage
|
||||
// was invalid. This can happen if the token contains invalid characters, such
|
||||
// as linebreaks.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
|
||||
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
generated
vendored
|
@ -34,9 +34,9 @@ const (
|
|||
// ErrCodeInvalidAuthorizationMessageException for service response error code
|
||||
// "InvalidAuthorizationMessageException".
|
||||
//
|
||||
// The error returned if the message passed to DecodeAuthorizationMessage was
|
||||
// invalid. This can happen if the token contains invalid characters, such as
|
||||
// linebreaks.
|
||||
// This error is returned if the message passed to DecodeAuthorizationMessage
|
||||
// was invalid. This can happen if the token contains invalid characters, such
|
||||
// as linebreaks.
|
||||
ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
|
||||
|
||||
// ErrCodeInvalidIdentityTokenException for service response error code
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
|
@ -46,11 +46,11 @@ const (
|
|||
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
|
||||
c := p.ClientConfig(EndpointsID, cfgs...)
|
||||
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
|
||||
}
|
||||
|
||||
// newClient creates, initializes and returns a new service client instance.
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
|
||||
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS {
|
||||
svc := &STS{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
|
@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
|||
ServiceID: ServiceID,
|
||||
SigningName: signingName,
|
||||
SigningRegion: signingRegion,
|
||||
PartitionID: partitionID,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "2011-06-15",
|
||||
},
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue