Fix godeps
Signed-off-by: Olivier Gambier <olivier@docker.com>
This commit is contained in:
parent
77e69b9cf3
commit
53e3c1d7b2
806 changed files with 431 additions and 1075412 deletions
134
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
134
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
|
@ -1,134 +0,0 @@
|
|||
// Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||
//
|
||||
// STS provides multiple ways to retrieve credentials which can be used when making
|
||||
// future AWS service API operation calls.
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of AssumeRole provider
|
||||
const ProviderName = "AssumeRoleProvider"
|
||||
|
||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||
type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||
// will be valid for.
|
||||
var DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||
// keeps track of their expiration time. This provider must be used explicitly,
|
||||
// as it is not included in the credentials chain.
|
||||
type AssumeRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// STS client to make assume role request with.
|
||||
Client AssumeRoler
|
||||
|
||||
// Role to be assumed.
|
||||
RoleARN string
|
||||
|
||||
// Session name, if you wish to reuse the credentials elsewhere.
|
||||
RoleSessionName string
|
||||
|
||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||
ExternalID *string
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: sts.New(c),
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfiede by the STS client.
|
||||
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: svc,
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||
}
|
||||
if p.Duration == 0 {
|
||||
// Expire as often as AWS permits.
|
||||
p.Duration = DefaultDuration
|
||||
}
|
||||
|
||||
roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{
|
||||
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
|
||||
RoleArn: aws.String(p.RoleARN),
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
ExternalId: p.ExternalID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// We will proactively generate new credentials before they expire.
|
||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
35
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
35
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// Package ec2query provides serialisation of AWS EC2 requests and responses.
|
||||
package ec2query
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building ec2query protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build}
|
||||
|
||||
// Build builds a request for the EC2 protocol.
|
||||
func Build(r *request.Request) {
|
||||
body := url.Values{
|
||||
"Action": {r.Operation.Name},
|
||||
"Version": {r.ClientInfo.APIVersion},
|
||||
}
|
||||
if err := queryutil.Parse(body, r.Params, true); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
|
||||
}
|
||||
|
||||
if r.ExpireTime == 0 {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
} else { // This is a pre-signed request
|
||||
r.HTTPRequest.Method = "GET"
|
||||
r.HTTPRequest.URL.RawQuery = body.Encode()
|
||||
}
|
||||
}
|
63
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
generated
vendored
63
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go
generated
vendored
|
@ -1,63 +0,0 @@
|
|||
package ec2query
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||
)
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Unmarshal unmarshals a response body for the EC2 protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
if r.DataFilled() {
|
||||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the EC2 protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
// TODO implement unmarshaling of request IDs
|
||||
}
|
||||
|
||||
type xmlErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Response"`
|
||||
Code string `xml:"Errors>Error>Code"`
|
||||
Message string `xml:"Errors>Error>Message"`
|
||||
RequestID string `xml:"RequestId"`
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the EC2 protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
resp := &xmlErrorResponse{}
|
||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
||||
if err != nil && err != io.EOF {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err)
|
||||
} else {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(resp.Code, resp.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
resp.RequestID,
|
||||
)
|
||||
}
|
||||
}
|
251
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
251
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
generated
vendored
|
@ -1,251 +0,0 @@
|
|||
// Package jsonutil provides JSON serialisation of AWS requests and responses.
|
||||
package jsonutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
var timeType = reflect.ValueOf(time.Time{}).Type()
|
||||
var byteSliceType = reflect.ValueOf([]byte{}).Type()
|
||||
|
||||
// BuildJSON builds a JSON string for a given object v.
|
||||
func BuildJSON(v interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := buildAny(reflect.ValueOf(v), &buf, "")
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
value = reflect.Indirect(value)
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
vtype := value.Type()
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch vtype.Kind() {
|
||||
case reflect.Struct:
|
||||
// also it can't be a time object
|
||||
if value.Type() != timeType {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
// also it can't be a byte slice
|
||||
if _, ok := value.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := vtype.FieldByName("_"); ok {
|
||||
tag = field.Tag
|
||||
}
|
||||
return buildStruct(value, buf, tag)
|
||||
case "list":
|
||||
return buildList(value, buf, tag)
|
||||
case "map":
|
||||
return buildMap(value, buf, tag)
|
||||
default:
|
||||
return buildScalar(value, buf, tag)
|
||||
}
|
||||
}
|
||||
|
||||
func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// unwrap payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := value.Type().FieldByName(payload)
|
||||
tag = field.Tag
|
||||
value = elemOf(value.FieldByName(payload))
|
||||
|
||||
if !value.IsValid() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteByte('{')
|
||||
|
||||
t := value.Type()
|
||||
first := true
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
member := value.Field(i)
|
||||
field := t.Field(i)
|
||||
|
||||
if field.PkgPath != "" {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if field.Tag.Get("json") == "-" {
|
||||
continue
|
||||
}
|
||||
if field.Tag.Get("location") != "" {
|
||||
continue // ignore non-body elements
|
||||
}
|
||||
|
||||
if protocol.CanSetIdempotencyToken(member, field) {
|
||||
token := protocol.GetIdempotencyToken()
|
||||
member = reflect.ValueOf(&token)
|
||||
}
|
||||
|
||||
if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
|
||||
// figure out what this field is called
|
||||
name := field.Name
|
||||
if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "%q:", name)
|
||||
|
||||
err := buildAny(member, buf, field.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
buf.WriteString("[")
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
buildAny(value.Index(i), buf, "")
|
||||
|
||||
if i < value.Len()-1 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("]")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type sortedValues []reflect.Value
|
||||
|
||||
func (sv sortedValues) Len() int { return len(sv) }
|
||||
func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
|
||||
func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
|
||||
|
||||
func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
buf.WriteString("{")
|
||||
|
||||
var sv sortedValues = value.MapKeys()
|
||||
sort.Sort(sv)
|
||||
|
||||
for i, k := range sv {
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
|
||||
fmt.Fprintf(buf, "%q:", k)
|
||||
buildAny(value.MapIndex(k), buf, "")
|
||||
}
|
||||
|
||||
buf.WriteString("}")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
|
||||
switch value.Kind() {
|
||||
case reflect.String:
|
||||
writeString(value.String(), buf)
|
||||
case reflect.Bool:
|
||||
buf.WriteString(strconv.FormatBool(value.Bool()))
|
||||
case reflect.Int64:
|
||||
buf.WriteString(strconv.FormatInt(value.Int(), 10))
|
||||
case reflect.Float64:
|
||||
buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64))
|
||||
default:
|
||||
switch value.Type() {
|
||||
case timeType:
|
||||
converted := value.Interface().(time.Time)
|
||||
buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10))
|
||||
case byteSliceType:
|
||||
if !value.IsNil() {
|
||||
converted := value.Interface().([]byte)
|
||||
buf.WriteByte('"')
|
||||
if len(converted) < 1024 {
|
||||
// for small buffers, using Encode directly is much faster.
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
|
||||
base64.StdEncoding.Encode(dst, converted)
|
||||
buf.Write(dst)
|
||||
} else {
|
||||
// for large buffers, avoid unnecessary extra temporary
|
||||
// buffer space.
|
||||
enc := base64.NewEncoder(base64.StdEncoding, buf)
|
||||
enc.Write(converted)
|
||||
enc.Close()
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeString(s string, buf *bytes.Buffer) {
|
||||
buf.WriteByte('"')
|
||||
for _, r := range s {
|
||||
if r == '"' {
|
||||
buf.WriteString(`\"`)
|
||||
} else if r == '\\' {
|
||||
buf.WriteString(`\\`)
|
||||
} else if r == '\b' {
|
||||
buf.WriteString(`\b`)
|
||||
} else if r == '\f' {
|
||||
buf.WriteString(`\f`)
|
||||
} else if r == '\r' {
|
||||
buf.WriteString(`\r`)
|
||||
} else if r == '\t' {
|
||||
buf.WriteString(`\t`)
|
||||
} else if r == '\n' {
|
||||
buf.WriteString(`\n`)
|
||||
} else if r < 32 {
|
||||
fmt.Fprintf(buf, "\\u%0.4x", r)
|
||||
} else {
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
buf.WriteByte('"')
|
||||
}
|
||||
|
||||
// Returns the reflection element of a value, if it is a pointer.
|
||||
func elemOf(value reflect.Value) reflect.Value {
|
||||
for value.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
}
|
||||
return value
|
||||
}
|
213
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
213
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
generated
vendored
|
@ -1,213 +0,0 @@
|
|||
package jsonutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// UnmarshalJSON reads a stream and unmarshals the results in object v.
|
||||
func UnmarshalJSON(v interface{}, stream io.Reader) error {
|
||||
var out interface{}
|
||||
|
||||
b, err := ioutil.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unmarshalAny(reflect.ValueOf(v), out, "")
|
||||
}
|
||||
|
||||
func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
vtype := value.Type()
|
||||
if vtype.Kind() == reflect.Ptr {
|
||||
vtype = vtype.Elem() // check kind of actual element type
|
||||
}
|
||||
|
||||
t := tag.Get("type")
|
||||
if t == "" {
|
||||
switch vtype.Kind() {
|
||||
case reflect.Struct:
|
||||
// also it can't be a time object
|
||||
if _, ok := value.Interface().(*time.Time); !ok {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
// also it can't be a byte slice
|
||||
if _, ok := value.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
}
|
||||
|
||||
switch t {
|
||||
case "structure":
|
||||
if field, ok := vtype.FieldByName("_"); ok {
|
||||
tag = field.Tag
|
||||
}
|
||||
return unmarshalStruct(value, data, tag)
|
||||
case "list":
|
||||
return unmarshalList(value, data, tag)
|
||||
case "map":
|
||||
return unmarshalMap(value, data, tag)
|
||||
default:
|
||||
return unmarshalScalar(value, data, tag)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
mapData, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a structure (%#v)", data)
|
||||
}
|
||||
|
||||
t := value.Type()
|
||||
if value.Kind() == reflect.Ptr {
|
||||
if value.IsNil() { // create the structure if it's nil
|
||||
s := reflect.New(value.Type().Elem())
|
||||
value.Set(s)
|
||||
value = s
|
||||
}
|
||||
|
||||
value = value.Elem()
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
// unwrap any payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := t.FieldByName(payload)
|
||||
return unmarshalAny(value.FieldByName(payload), data, field.Tag)
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
field := t.Field(i)
|
||||
if field.PkgPath != "" {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
|
||||
// figure out what this field is called
|
||||
name := field.Name
|
||||
if locName := field.Tag.Get("locationName"); locName != "" {
|
||||
name = locName
|
||||
}
|
||||
|
||||
member := value.FieldByIndex(field.Index)
|
||||
err := unmarshalAny(member, mapData[name], field.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
listData, ok := data.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a list (%#v)", data)
|
||||
}
|
||||
|
||||
if value.IsNil() {
|
||||
l := len(listData)
|
||||
value.Set(reflect.MakeSlice(value.Type(), l, l))
|
||||
}
|
||||
|
||||
for i, c := range listData {
|
||||
err := unmarshalAny(value.Index(i), c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
mapData, ok := data.(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("JSON value is not a map (%#v)", data)
|
||||
}
|
||||
|
||||
if value.IsNil() {
|
||||
value.Set(reflect.MakeMap(value.Type()))
|
||||
}
|
||||
|
||||
for k, v := range mapData {
|
||||
kvalue := reflect.ValueOf(k)
|
||||
vvalue := reflect.New(value.Type().Elem()).Elem()
|
||||
|
||||
unmarshalAny(vvalue, v, "")
|
||||
value.SetMapIndex(kvalue, vvalue)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
|
||||
errf := func() error {
|
||||
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
|
||||
}
|
||||
|
||||
switch d := data.(type) {
|
||||
case nil:
|
||||
return nil // nothing to do here
|
||||
case string:
|
||||
switch value.Interface().(type) {
|
||||
case *string:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value.Set(reflect.ValueOf(b))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
case float64:
|
||||
switch value.Interface().(type) {
|
||||
case *int64:
|
||||
di := int64(d)
|
||||
value.Set(reflect.ValueOf(&di))
|
||||
case *float64:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
case *time.Time:
|
||||
t := time.Unix(int64(d), 0).UTC()
|
||||
value.Set(reflect.ValueOf(&t))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
case bool:
|
||||
switch value.Interface().(type) {
|
||||
case *bool:
|
||||
value.Set(reflect.ValueOf(&d))
|
||||
default:
|
||||
return errf()
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported JSON value (%v)", data)
|
||||
}
|
||||
return nil
|
||||
}
|
111
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
111
vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
generated
vendored
|
@ -1,111 +0,0 @@
|
|||
// Package jsonrpc provides JSON RPC utilities for serialisation of AWS
|
||||
// requests and responses.
|
||||
package jsonrpc
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
var emptyJSON = []byte("{}")
|
||||
|
||||
// BuildHandler is a named request handler for building jsonrpc protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Build builds a JSON payload for a JSON RPC request.
|
||||
func Build(req *request.Request) {
|
||||
var buf []byte
|
||||
var err error
|
||||
if req.ParamsFilled() {
|
||||
buf, err = jsonutil.BuildJSON(req.Params)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
buf = emptyJSON
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
|
||||
req.SetBufferBody(buf)
|
||||
}
|
||||
|
||||
if req.ClientInfo.TargetPrefix != "" {
|
||||
target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
|
||||
req.HTTPRequest.Header.Add("X-Amz-Target", target)
|
||||
}
|
||||
if req.ClientInfo.JSONVersion != "" {
|
||||
jsonVersion := req.ClientInfo.JSONVersion
|
||||
req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response for a JSON RPC service.
|
||||
func Unmarshal(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
if req.DataFilled() {
|
||||
err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
|
||||
func UnmarshalMeta(req *request.Request) {
|
||||
rest.UnmarshalMeta(req)
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals an error response for a JSON RPC service.
|
||||
func UnmarshalError(req *request.Request) {
|
||||
defer req.HTTPResponse.Body.Close()
|
||||
bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err)
|
||||
return
|
||||
}
|
||||
if len(bodyBytes) == 0 {
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", req.HTTPResponse.Status, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
return
|
||||
}
|
||||
var jsonErr jsonErrorResponse
|
||||
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
|
||||
req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err)
|
||||
return
|
||||
}
|
||||
|
||||
codes := strings.SplitN(jsonErr.Code, "#", 2)
|
||||
req.Error = awserr.NewRequestFailure(
|
||||
awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
|
||||
req.HTTPResponse.StatusCode,
|
||||
req.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"__type"`
|
||||
Message string `json:"message"`
|
||||
}
|
91
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
91
vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
// Package restjson provides RESTful JSON serialisation of AWS
|
||||
// requests and responses.
|
||||
package restjson
|
||||
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go
|
||||
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
// BuildHandler is a named request handler for building restjson protocol requests
|
||||
var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build}
|
||||
|
||||
// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests
|
||||
var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal}
|
||||
|
||||
// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata
|
||||
var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta}
|
||||
|
||||
// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors
|
||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError}
|
||||
|
||||
// Build builds a request for the REST JSON protocol.
|
||||
func Build(r *request.Request) {
|
||||
rest.Build(r)
|
||||
|
||||
if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
|
||||
jsonrpc.Build(r)
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals a response body for the REST JSON protocol.
|
||||
func Unmarshal(r *request.Request) {
|
||||
if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
|
||||
jsonrpc.Unmarshal(r)
|
||||
} else {
|
||||
rest.Unmarshal(r)
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
|
||||
func UnmarshalMeta(r *request.Request) {
|
||||
rest.UnmarshalMeta(r)
|
||||
}
|
||||
|
||||
// UnmarshalError unmarshals a response error for the REST JSON protocol.
|
||||
func UnmarshalError(r *request.Request) {
|
||||
code := r.HTTPResponse.Header.Get("X-Amzn-Errortype")
|
||||
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err)
|
||||
return
|
||||
}
|
||||
if len(bodyBytes) == 0 {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", r.HTTPResponse.Status, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
"",
|
||||
)
|
||||
return
|
||||
}
|
||||
var jsonErr jsonErrorResponse
|
||||
if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err)
|
||||
return
|
||||
}
|
||||
|
||||
if code == "" {
|
||||
code = jsonErr.Code
|
||||
}
|
||||
|
||||
code = strings.SplitN(code, ":", 2)[0]
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(code, jsonErr.Message, nil),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
||||
type jsonErrorResponse struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
246
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
246
vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
generated
vendored
|
@ -1,246 +0,0 @@
|
|||
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
// Package s3iface provides an interface for the Amazon Simple Storage Service.
|
||||
package s3iface
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// S3API is the interface type for s3.S3.
|
||||
type S3API interface {
|
||||
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
|
||||
|
||||
AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
|
||||
|
||||
CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
|
||||
|
||||
CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
|
||||
|
||||
CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
|
||||
|
||||
CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
|
||||
|
||||
CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
|
||||
|
||||
CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
|
||||
|
||||
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
|
||||
|
||||
CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
|
||||
|
||||
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
|
||||
|
||||
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
|
||||
|
||||
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
|
||||
|
||||
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
|
||||
|
||||
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
|
||||
|
||||
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
|
||||
|
||||
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
|
||||
|
||||
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
|
||||
|
||||
DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
|
||||
|
||||
DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
|
||||
|
||||
DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
|
||||
|
||||
DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
|
||||
|
||||
DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
|
||||
|
||||
DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
|
||||
|
||||
DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
|
||||
|
||||
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
|
||||
|
||||
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
|
||||
|
||||
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
|
||||
|
||||
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
|
||||
|
||||
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
|
||||
|
||||
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
|
||||
|
||||
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
|
||||
|
||||
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
|
||||
|
||||
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
|
||||
|
||||
GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
|
||||
|
||||
GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
|
||||
|
||||
GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
|
||||
|
||||
GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
|
||||
|
||||
GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
|
||||
|
||||
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
|
||||
|
||||
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
|
||||
|
||||
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
|
||||
|
||||
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
|
||||
|
||||
GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
|
||||
|
||||
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
|
||||
|
||||
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
|
||||
|
||||
GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
|
||||
|
||||
GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
|
||||
|
||||
GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
|
||||
|
||||
GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
|
||||
|
||||
GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
|
||||
|
||||
GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
|
||||
|
||||
GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
|
||||
|
||||
GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
|
||||
|
||||
GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
|
||||
|
||||
GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
|
||||
|
||||
GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
|
||||
|
||||
GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
|
||||
|
||||
GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
|
||||
|
||||
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
|
||||
|
||||
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
|
||||
|
||||
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
|
||||
|
||||
HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
|
||||
|
||||
HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
|
||||
|
||||
HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
|
||||
|
||||
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
|
||||
|
||||
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
|
||||
|
||||
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
|
||||
|
||||
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
|
||||
|
||||
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
|
||||
|
||||
ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
|
||||
|
||||
ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
|
||||
|
||||
ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
|
||||
|
||||
ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
|
||||
|
||||
ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
|
||||
|
||||
ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
|
||||
|
||||
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
|
||||
|
||||
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
|
||||
|
||||
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
|
||||
|
||||
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
|
||||
|
||||
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
|
||||
|
||||
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
|
||||
|
||||
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
|
||||
|
||||
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
|
||||
|
||||
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
|
||||
|
||||
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
|
||||
|
||||
PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
|
||||
|
||||
PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
|
||||
|
||||
PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
|
||||
|
||||
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
|
||||
|
||||
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
|
||||
|
||||
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
|
||||
|
||||
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
|
||||
|
||||
PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
|
||||
|
||||
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
|
||||
|
||||
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
|
||||
|
||||
PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
|
||||
|
||||
PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
|
||||
|
||||
PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
|
||||
|
||||
PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
|
||||
|
||||
PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
|
||||
|
||||
PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
|
||||
|
||||
PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
|
||||
|
||||
PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
|
||||
|
||||
PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
|
||||
|
||||
PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
|
||||
|
||||
PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
|
||||
|
||||
PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
|
||||
|
||||
PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
|
||||
|
||||
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
|
||||
|
||||
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
|
||||
|
||||
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
|
||||
|
||||
UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
|
||||
|
||||
UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
|
||||
|
||||
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
|
||||
|
||||
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
|
||||
}
|
||||
|
||||
var _ S3API = (*s3.S3)(nil)
|
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
// Package s3manager provides utilities to upload and download objects from
|
||||
// S3 concurrently. Helpful for when working with large objects.
|
||||
package s3manager
|
354
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
354
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
|
@ -1,354 +0,0 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
// using Download().
|
||||
const DefaultDownloadPartSize = 1024 * 1024 * 5
|
||||
|
||||
// DefaultDownloadConcurrency is the default number of goroutines to spin up
|
||||
// when using Download().
|
||||
const DefaultDownloadConcurrency = 5
|
||||
|
||||
// The Downloader structure that calls Download(). It is safe to call Download()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Downloader's properties is not safe to be done concurrently.
|
||||
type Downloader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultPartSize value will be used.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultConcurrency value will be used.
|
||||
Concurrency int
|
||||
|
||||
// An S3 client to use when performing downloads.
|
||||
S3 s3iface.S3API
|
||||
}
|
||||
|
||||
// NewDownloader creates a new Downloader instance to downloads objects from
|
||||
// S3 in concurrent chunks. Pass in additional functional options to customize
|
||||
// the downloader behavior. Requires a client.ConfigProvider in order to create
|
||||
// a S3 service client. The session.Session satisfies the client.ConfigProvider
|
||||
// interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Downloader will use
|
||||
// sess := session.New()
|
||||
//
|
||||
// // Create a downloader with the session and default options
|
||||
// downloader := s3manager.NewDownloader(sess)
|
||||
//
|
||||
// // Create a downloader with the session and custom options
|
||||
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// NewDownloaderWithClient creates a new Downloader instance to downloads
|
||||
// objects from S3 in concurrent chunks. Pass in additional functional
|
||||
// options to customize the downloader behavior. Requires a S3 service client
|
||||
// to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // The S3 client the S3 Downloader will use
|
||||
// s3Svc := s3.new(session.New())
|
||||
//
|
||||
// // Create a downloader with the s3 client and default options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create a downloader with the s3 client and custom options
|
||||
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {
|
||||
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
S3: svc,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Download downloads an object in S3 and writes the payload into w using
|
||||
// concurrent GET requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
|
||||
impl := downloader{w: w, in: input, ctx: d}
|
||||
|
||||
for _, option := range options {
|
||||
option(&impl.ctx)
|
||||
}
|
||||
|
||||
return impl.download()
|
||||
}
|
||||
|
||||
// downloader is the implementation structure used internally by Downloader.
|
||||
type downloader struct {
|
||||
ctx Downloader
|
||||
|
||||
in *s3.GetObjectInput
|
||||
w io.WriterAt
|
||||
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
|
||||
pos int64
|
||||
totalBytes int64
|
||||
written int64
|
||||
err error
|
||||
}
|
||||
|
||||
// init initializes the downloader with default options.
|
||||
func (d *downloader) init() {
|
||||
d.totalBytes = -1
|
||||
|
||||
if d.ctx.Concurrency == 0 {
|
||||
d.ctx.Concurrency = DefaultDownloadConcurrency
|
||||
}
|
||||
|
||||
if d.ctx.PartSize == 0 {
|
||||
d.ctx.PartSize = DefaultDownloadPartSize
|
||||
}
|
||||
}
|
||||
|
||||
// download performs the implementation of the object download across ranged
|
||||
// GETs.
|
||||
func (d *downloader) download() (n int64, err error) {
|
||||
d.init()
|
||||
|
||||
// Spin off first worker to check additional header information
|
||||
d.getChunk()
|
||||
|
||||
if total := d.getTotalBytes(); total >= 0 {
|
||||
// Spin up workers
|
||||
ch := make(chan dlchunk, d.ctx.Concurrency)
|
||||
|
||||
for i := 0; i < d.ctx.Concurrency; i++ {
|
||||
d.wg.Add(1)
|
||||
go d.downloadPart(ch)
|
||||
}
|
||||
|
||||
// Assign work
|
||||
for d.getErr() == nil {
|
||||
if d.pos >= total {
|
||||
break // We're finished queueing chunks
|
||||
}
|
||||
|
||||
// Queue the next range of bytes to read.
|
||||
ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}
|
||||
d.pos += d.ctx.PartSize
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
close(ch)
|
||||
d.wg.Wait()
|
||||
} else {
|
||||
// Checking if we read anything new
|
||||
for d.err == nil {
|
||||
d.getChunk()
|
||||
}
|
||||
|
||||
// We expect a 416 error letting us know we are done downloading the
|
||||
// total bytes. Since we do not know the content's length, this will
|
||||
// keep grabbing chunks of data until the range of bytes specified in
|
||||
// the request is out of range of the content. Once, this happens, a
|
||||
// 416 should occur.
|
||||
e, ok := d.err.(awserr.RequestFailure)
|
||||
if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
|
||||
d.err = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return error
|
||||
return d.written, d.err
|
||||
}
|
||||
|
||||
// downloadPart is an individual goroutine worker reading from the ch channel
|
||||
// and performing a GetObject request on the data with a given byte range.
|
||||
//
|
||||
// If this is the first worker, this operation also resolves the total number
|
||||
// of bytes to be read so that the worker manager knows when it is finished.
|
||||
func (d *downloader) downloadPart(ch chan dlchunk) {
|
||||
defer d.wg.Done()
|
||||
for {
|
||||
chunk, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
d.downloadChunk(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
// getChunk grabs a chunk of data from the body.
|
||||
// Not thread safe. Should only used when grabbing data on a single thread.
|
||||
func (d *downloader) getChunk() {
|
||||
chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}
|
||||
d.pos += d.ctx.PartSize
|
||||
d.downloadChunk(chunk)
|
||||
}
|
||||
|
||||
// downloadChunk downloads the chunk froom s3
|
||||
func (d *downloader) downloadChunk(chunk dlchunk) {
|
||||
if d.getErr() != nil {
|
||||
return
|
||||
}
|
||||
// Get the next byte range of data
|
||||
in := &s3.GetObjectInput{}
|
||||
awsutil.Copy(in, d.in)
|
||||
rng := fmt.Sprintf("bytes=%d-%d",
|
||||
chunk.start, chunk.start+chunk.size-1)
|
||||
in.Range = &rng
|
||||
|
||||
req, resp := d.ctx.S3.GetObjectRequest(in)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
err := req.Send()
|
||||
|
||||
if err != nil {
|
||||
d.setErr(err)
|
||||
} else {
|
||||
d.setTotalBytes(resp) // Set total if not yet set.
|
||||
|
||||
n, err := io.Copy(&chunk, resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
d.setErr(err)
|
||||
}
|
||||
d.incrWritten(n)
|
||||
}
|
||||
}
|
||||
|
||||
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
|
||||
func (d *downloader) getTotalBytes() int64 {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.totalBytes
|
||||
}
|
||||
|
||||
// setTotalBytes is a thread-safe setter for setting the total byte status.
|
||||
// Will extract the object's total bytes from the Content-Range if the file
|
||||
// will be chunked, or Content-Length. Content-Length is used when the response
|
||||
// does not include a Content-Range. Meaning the object was not chunked. This
|
||||
// occurs when the full file fits within the PartSize directive.
|
||||
func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if d.totalBytes >= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if resp.ContentRange == nil {
|
||||
// ContentRange is nil when the full file contents is provied, and
|
||||
// is not chunked. Use ContentLength instead.
|
||||
if resp.ContentLength != nil {
|
||||
d.totalBytes = *resp.ContentLength
|
||||
return
|
||||
}
|
||||
} else {
|
||||
parts := strings.Split(*resp.ContentRange, "/")
|
||||
|
||||
total := int64(-1)
|
||||
var err error
|
||||
// Checking for whether or not a numbered total exists
|
||||
// If one does not exist, we will assume the total to be -1, undefined,
|
||||
// and sequentially download each chunk until hitting a 416 error
|
||||
totalStr := parts[len(parts)-1]
|
||||
if totalStr != "*" {
|
||||
total, err = strconv.ParseInt(totalStr, 10, 64)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.totalBytes = total
|
||||
}
|
||||
}
|
||||
|
||||
func (d *downloader) incrWritten(n int64) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.written += n
|
||||
}
|
||||
|
||||
// getErr is a thread-safe getter for the error object
|
||||
func (d *downloader) getErr() error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
return d.err
|
||||
}
|
||||
|
||||
// setErr is a thread-safe setter for the error object
|
||||
func (d *downloader) setErr(e error) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
d.err = e
|
||||
}
|
||||
|
||||
// dlchunk represents a single chunk of data to write by the worker routine.
|
||||
// This structure also implements an io.SectionReader style interface for
|
||||
// io.WriterAt, effectively making it an io.SectionWriter (which does not
|
||||
// exist).
|
||||
type dlchunk struct {
|
||||
w io.WriterAt
|
||||
start int64
|
||||
size int64
|
||||
cur int64
|
||||
}
|
||||
|
||||
// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
|
||||
// position to its end (or EOF).
|
||||
func (c *dlchunk) Write(p []byte) (n int, err error) {
|
||||
if c.cur >= c.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n, err = c.w.WriteAt(p, c.start+c.cur)
|
||||
c.cur += int64(n)
|
||||
|
||||
return
|
||||
}
|
23
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go
generated
vendored
23
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
// Package s3manageriface provides an interface for the s3manager package
|
||||
package s3manageriface
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
)
|
||||
|
||||
// DownloaderAPI is the interface type for s3manager.Downloader.
|
||||
type DownloaderAPI interface {
|
||||
Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error)
|
||||
}
|
||||
|
||||
var _ DownloaderAPI = (*s3manager.Downloader)(nil)
|
||||
|
||||
// UploaderAPI is the interface type for s3manager.Uploader.
|
||||
type UploaderAPI interface {
|
||||
Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
|
||||
}
|
||||
|
||||
var _ UploaderAPI = (*s3manager.Uploader)(nil)
|
661
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
661
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -1,661 +0,0 @@
|
|||
package s3manager
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
|
||||
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
|
||||
// on Amazon S3.
|
||||
const MaxUploadParts = 10000
|
||||
|
||||
// MinUploadPartSize is the minimum allowed part size when uploading a part to
|
||||
// Amazon S3.
|
||||
const MinUploadPartSize int64 = 1024 * 1024 * 5
|
||||
|
||||
// DefaultUploadPartSize is the default part size to buffer chunks of a
|
||||
// payload into.
|
||||
const DefaultUploadPartSize = MinUploadPartSize
|
||||
|
||||
// DefaultUploadConcurrency is the default number of goroutines to spin up when
|
||||
// using Upload().
|
||||
const DefaultUploadConcurrency = 5
|
||||
|
||||
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
|
||||
// will satisfy this interface when a multi part upload failed to upload all
|
||||
// chucks to S3. In the case of a failure the UploadID is needed to operate on
|
||||
// the chunks, if any, which were uploaded.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// u := s3manager.NewUploader(opts)
|
||||
// output, err := u.upload(input)
|
||||
// if err != nil {
|
||||
// if multierr, ok := err.(MultiUploadFailure); ok {
|
||||
// // Process error and its associated uploadID
|
||||
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
||||
// } else {
|
||||
// // Process error generically
|
||||
// fmt.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type MultiUploadFailure interface {
|
||||
awserr.Error
|
||||
|
||||
// Returns the upload id for the S3 multipart upload that failed.
|
||||
UploadID() string
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the multiUploadError struct and not conflict with the error.Error() method.
|
||||
type awsError awserr.Error
|
||||
|
||||
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
|
||||
// Composed of BaseError for code, message, and original error
|
||||
//
|
||||
// Should be used for an error that occurred failing a S3 multipart upload,
|
||||
// and a upload ID is available. If an uploadID is not available a more relevant
|
||||
type multiUploadError struct {
|
||||
awsError
|
||||
|
||||
// ID for multipart upload which failed.
|
||||
uploadID string
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See apierr.BaseError ErrorWithExtra for output format
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (m multiUploadError) Error() string {
|
||||
extra := fmt.Sprintf("upload id: %s", m.uploadID)
|
||||
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (m multiUploadError) String() string {
|
||||
return m.Error()
|
||||
}
|
||||
|
||||
// UploadID returns the id of the S3 upload which failed.
|
||||
func (m multiUploadError) UploadID() string {
|
||||
return m.uploadID
|
||||
}
|
||||
|
||||
// UploadInput contains all input for upload requests to Amazon S3.
|
||||
type UploadInput struct {
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
|
||||
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Specifies caching behavior along the request/reply chain.
|
||||
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
|
||||
|
||||
// Specifies presentational information for the object.
|
||||
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
|
||||
|
||||
// Specifies what content encodings have been applied to the object and thus
|
||||
// what decoding mechanisms must be applied to obtain the media-type referenced
|
||||
// by the Content-Type header field.
|
||||
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
|
||||
|
||||
// The language the content is in.
|
||||
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
|
||||
|
||||
// A standard MIME type describing the format of the object data.
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
||||
// Allows grantee to read the object data and its metadata.
|
||||
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
|
||||
|
||||
// Allows grantee to read the object ACL.
|
||||
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
|
||||
|
||||
// Allows grantee to write the ACL for the applicable object.
|
||||
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
|
||||
|
||||
Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
// Documentation on downloading objects from requester pays buckets can be found
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256,
|
||||
// aws:kms).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
|
||||
// data. This value is used to store the object and then it is discarded; Amazon
|
||||
// does not store the encryption key. The key must be appropriate for use with
|
||||
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
|
||||
// header.
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
|
||||
|
||||
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
|
||||
// Amazon S3 uses this header for a message integrity check to ensure the encryption
|
||||
// key was transmitted without error.
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
|
||||
// requests for an object protected by AWS KMS will fail if not made via SSL
|
||||
// or using SigV4. Documentation on configuring any of the officially supported
|
||||
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
|
||||
SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing this object in S3
|
||||
// (e.g., AES256, aws:kms).
|
||||
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
|
||||
|
||||
// The type of storage to use for the object. Defaults to 'STANDARD'.
|
||||
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
|
||||
|
||||
// If the bucket is configured as a website, redirects requests for this object
|
||||
// to another object in the same bucket or to an external URL. Amazon S3 stores
|
||||
// the value of this header in the object metadata.
|
||||
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
|
||||
|
||||
// The readable body payload to send to S3.
|
||||
Body io.Reader
|
||||
}
|
||||
|
||||
// UploadOutput represents a response from the Upload() call.
|
||||
type UploadOutput struct {
|
||||
// The URL where the object was uploaded to.
|
||||
Location string
|
||||
|
||||
// The version of the object that was uploaded. Will only be populated if
|
||||
// the S3 Bucket is versioned. If the bucket is not versioned this field
|
||||
// will not be set.
|
||||
VersionID *string
|
||||
|
||||
// The ID for a multipart upload to S3. In the case of an error the error
|
||||
// can be cast to the MultiUploadFailure interface to extract the upload ID.
|
||||
UploadID string
|
||||
}
|
||||
|
||||
// The Uploader structure that calls Upload(). It is safe to call Upload()
|
||||
// on this structure for multiple objects and across concurrent goroutines.
|
||||
// Mutating the Uploader's properties is not safe to be done concurrently.
|
||||
type Uploader struct {
|
||||
// The buffer size (in bytes) to use when buffering data into chunks and
|
||||
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
||||
// if this value is set to zero, the DefaultPartSize value will be used.
|
||||
PartSize int64
|
||||
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultConcurrency value will be used.
|
||||
Concurrency int
|
||||
|
||||
// Setting this value to true will cause the SDK to avoid calling
|
||||
// AbortMultipartUpload on a failure, leaving all successfully uploaded
|
||||
// parts on S3 for manual recovery.
|
||||
//
|
||||
// Note that storing parts of an incomplete multipart upload counts towards
|
||||
// space usage on S3 and will add additional costs if not cleaned up.
|
||||
LeavePartsOnError bool
|
||||
|
||||
// MaxUploadParts is the max number of parts which will be uploaded to S3.
|
||||
// Will be used to calculate the partsize of the object to be uploaded.
|
||||
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
||||
// as 100, 50MB parts.
|
||||
// With a limited of s3.MaxUploadParts (10,000 parts).
|
||||
MaxUploadParts int
|
||||
|
||||
// The client to use when uploading to S3.
|
||||
S3 s3iface.S3API
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
|
||||
// additional functional options to customize the uploader's behavior. Requires a
|
||||
// client.ConfigProvider in order to create a S3 service client. The session.Session
|
||||
// satisfies the client.ConfigProvider interface.
|
||||
//
|
||||
// Example:
|
||||
// // The session the S3 Uploader will use
|
||||
// sess := session.New()
|
||||
//
|
||||
// // Create an uploader with the session and default options
|
||||
// uploader := s3manager.NewUploader(sess)
|
||||
//
|
||||
// // Create an uploader with the session and custom options
|
||||
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: s3.New(c),
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
|
||||
// additional functional options to customize the uploader's behavior. Requires
|
||||
// a S3 service client to make S3 API calls.
|
||||
//
|
||||
// Example:
|
||||
// // S3 service client the Upload manager will use.
|
||||
// s3Svc := s3.New(session.New())
|
||||
//
|
||||
// // Create an uploader with S3 client and default options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
||||
//
|
||||
// // Create an uploader with S3 client and custom options
|
||||
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
||||
// })
|
||||
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
||||
u := &Uploader{
|
||||
S3: svc,
|
||||
PartSize: DefaultUploadPartSize,
|
||||
Concurrency: DefaultUploadConcurrency,
|
||||
LeavePartsOnError: false,
|
||||
MaxUploadParts: MaxUploadParts,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(u)
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
// Upload uploads an object to S3, intelligently buffering large files into
|
||||
// smaller chunks and sending them in parallel across multiple goroutines. You
|
||||
// can configure the buffer size and concurrency through the Uploader's parameters.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
// upload. These options are copies of the Uploader instance Upload is called from.
|
||||
// Modifying the options will not impact the original Uploader instance.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// Example:
|
||||
// // Upload input parameters
|
||||
// upParams := &s3manager.UploadInput{
|
||||
// Bucket: &bucketName,
|
||||
// Key: &keyName,
|
||||
// Body: file,
|
||||
// }
|
||||
//
|
||||
// // Perform an upload.
|
||||
// result, err := uploader.Upload(upParams)
|
||||
//
|
||||
// // Perform upload with options different than the those in the Uploader.
|
||||
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
||||
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
||||
// u.LeavePartsOnError = true // Dont delete the parts if the upload fails.
|
||||
// })
|
||||
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
|
||||
i := uploader{in: input, ctx: u}
|
||||
|
||||
for _, option := range options {
|
||||
option(&i.ctx)
|
||||
}
|
||||
|
||||
return i.upload()
|
||||
}
|
||||
|
||||
// internal structure to manage an upload to S3.
|
||||
type uploader struct {
|
||||
ctx Uploader
|
||||
|
||||
in *UploadInput
|
||||
|
||||
readerPos int64 // current reader position
|
||||
totalSize int64 // set to -1 if the size is not known
|
||||
}
|
||||
|
||||
// internal logic for deciding whether to upload a single part or use a
|
||||
// multipart upload.
|
||||
func (u *uploader) upload() (*UploadOutput, error) {
|
||||
u.init()
|
||||
|
||||
if u.ctx.PartSize < MinUploadPartSize {
|
||||
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
||||
return nil, awserr.New("ConfigError", msg, nil)
|
||||
}
|
||||
|
||||
// Do one read to determine if we have more than one part
|
||||
buf, err := u.nextReader()
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF { // single part
|
||||
return u.singlePart(buf)
|
||||
} else if err != nil {
|
||||
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
|
||||
}
|
||||
|
||||
mu := multiuploader{uploader: u}
|
||||
return mu.upload(buf)
|
||||
}
|
||||
|
||||
// init will initialize all default options.
|
||||
func (u *uploader) init() {
|
||||
if u.ctx.Concurrency == 0 {
|
||||
u.ctx.Concurrency = DefaultUploadConcurrency
|
||||
}
|
||||
if u.ctx.PartSize == 0 {
|
||||
u.ctx.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
|
||||
// Try to get the total size for some optimizations
|
||||
u.initSize()
|
||||
}
|
||||
|
||||
// initSize tries to detect the total stream size, setting u.totalSize. If
|
||||
// the size is not known, totalSize is set to -1.
|
||||
func (u *uploader) initSize() {
|
||||
u.totalSize = -1
|
||||
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.Seeker:
|
||||
pos, _ := r.Seek(0, 1)
|
||||
defer r.Seek(pos, 0)
|
||||
|
||||
n, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
u.totalSize = n
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextReader returns a seekable reader representing the next packet of data.
|
||||
// This operation increases the shared u.readerPos counter, but note that it
|
||||
// does not need to be wrapped in a mutex because nextReader is only called
|
||||
// from the main thread.
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, error) {
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.ReaderAt:
|
||||
var err error
|
||||
|
||||
n := u.ctx.PartSize
|
||||
if u.totalSize >= 0 {
|
||||
bytesLeft := u.totalSize - u.readerPos
|
||||
|
||||
if bytesLeft == 0 {
|
||||
err = io.EOF
|
||||
n = bytesLeft
|
||||
} else if bytesLeft <= u.ctx.PartSize {
|
||||
err = io.ErrUnexpectedEOF
|
||||
n = bytesLeft
|
||||
}
|
||||
}
|
||||
|
||||
buf := io.NewSectionReader(r, u.readerPos, n)
|
||||
u.readerPos += n
|
||||
|
||||
return buf, err
|
||||
|
||||
default:
|
||||
packet := make([]byte, u.ctx.PartSize)
|
||||
n, err := io.ReadFull(u.in.Body, packet)
|
||||
u.readerPos += int64(n)
|
||||
|
||||
return bytes.NewReader(packet[0:n]), err
|
||||
}
|
||||
}
|
||||
|
||||
// singlePart contains upload logic for uploading a single chunk via
|
||||
// a regular PutObject request. Multipart requests require at least two
|
||||
// parts, or at least 5MB of data.
|
||||
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
|
||||
params := &s3.PutObjectInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
params.Body = buf
|
||||
|
||||
req, out := u.ctx.S3.PutObjectRequest(params)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := req.HTTPRequest.URL.String()
|
||||
return &UploadOutput{
|
||||
Location: url,
|
||||
VersionID: out.VersionId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// internal structure to manage a specific multipart upload to S3.
|
||||
type multiuploader struct {
|
||||
*uploader
|
||||
wg sync.WaitGroup
|
||||
m sync.Mutex
|
||||
err error
|
||||
uploadID string
|
||||
parts completedParts
|
||||
}
|
||||
|
||||
// keeps track of a single chunk of data being sent to S3.
|
||||
type chunk struct {
|
||||
buf io.ReadSeeker
|
||||
num int64
|
||||
}
|
||||
|
||||
// completedParts is a wrapper to make parts sortable by their part number,
|
||||
// since S3 required this list to be sent in sorted order.
|
||||
type completedParts []*s3.CompletedPart
|
||||
|
||||
func (a completedParts) Len() int { return len(a) }
|
||||
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
|
||||
|
||||
// upload will perform a multipart upload using the firstBuf buffer containing
|
||||
// the first chunk of data.
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
params := &s3.CreateMultipartUploadInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
|
||||
// Create the multipart
|
||||
req, resp := u.ctx.S3.CreateMultipartUploadRequest(params)
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.uploadID = *resp.UploadId
|
||||
|
||||
// Create the workers
|
||||
ch := make(chan chunk, u.ctx.Concurrency)
|
||||
for i := 0; i < u.ctx.Concurrency; i++ {
|
||||
u.wg.Add(1)
|
||||
go u.readChunk(ch)
|
||||
}
|
||||
|
||||
// Send part 1 to the workers
|
||||
var num int64 = 1
|
||||
ch <- chunk{buf: firstBuf, num: num}
|
||||
|
||||
// Read and queue the rest of the parts
|
||||
for u.geterr() == nil {
|
||||
// This upload exceeded maximum number of supported parts, error now.
|
||||
if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
|
||||
var msg string
|
||||
if num > int64(u.ctx.MaxUploadParts) {
|
||||
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
u.ctx.MaxUploadParts)
|
||||
} else {
|
||||
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
||||
MaxUploadParts)
|
||||
}
|
||||
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
|
||||
break
|
||||
}
|
||||
num++
|
||||
|
||||
buf, err := u.nextReader()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
ch <- chunk{buf: buf, num: num}
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
u.seterr(awserr.New(
|
||||
"ReadRequestBody",
|
||||
"read multipart upload data failed",
|
||||
err))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel, wait for workers, and complete upload
|
||||
close(ch)
|
||||
u.wg.Wait()
|
||||
complete := u.complete()
|
||||
|
||||
if err := u.geterr(); err != nil {
|
||||
return nil, &multiUploadError{
|
||||
awsError: awserr.New(
|
||||
"MultipartUpload",
|
||||
"upload multipart failed",
|
||||
err),
|
||||
uploadID: u.uploadID,
|
||||
}
|
||||
}
|
||||
return &UploadOutput{
|
||||
Location: *complete.Location,
|
||||
VersionID: complete.VersionId,
|
||||
UploadID: u.uploadID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// readChunk runs in worker goroutines to pull chunks off of the ch channel
|
||||
// and send() them as UploadPart requests.
|
||||
func (u *multiuploader) readChunk(ch chan chunk) {
|
||||
defer u.wg.Done()
|
||||
for {
|
||||
data, ok := <-ch
|
||||
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if u.geterr() == nil {
|
||||
if err := u.send(data); err != nil {
|
||||
u.seterr(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send performs an UploadPart request and keeps track of the completed
|
||||
// part information.
|
||||
func (u *multiuploader) send(c chunk) error {
|
||||
req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
Body: c.buf,
|
||||
UploadId: &u.uploadID,
|
||||
PartNumber: &c.num,
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := c.num
|
||||
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
|
||||
|
||||
u.m.Lock()
|
||||
u.parts = append(u.parts, completed)
|
||||
u.m.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// geterr is a thread-safe getter for the error object
|
||||
func (u *multiuploader) geterr() error {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
return u.err
|
||||
}
|
||||
|
||||
// seterr is a thread-safe setter for the error object
|
||||
func (u *multiuploader) seterr(e error) {
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
u.err = e
|
||||
}
|
||||
|
||||
// fail will abort the multipart unless LeavePartsOnError is set to true.
|
||||
func (u *multiuploader) fail() {
|
||||
if u.ctx.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
|
||||
req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
req.Send()
|
||||
}
|
||||
|
||||
// complete successfully completes a multipart upload and returns the response.
|
||||
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
||||
if u.geterr() != nil {
|
||||
u.fail()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parts must be sorted in PartNumber order.
|
||||
sort.Sort(u.parts)
|
||||
|
||||
req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
|
||||
Bucket: u.in.Bucket,
|
||||
Key: u.in.Key,
|
||||
UploadId: &u.uploadID,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
|
||||
})
|
||||
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
|
||||
if err := req.Send(); err != nil {
|
||||
u.seterr(err)
|
||||
u.fail()
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue