forked from TrueCloudLab/rclone
vendor: update to latest versions of everything
This commit is contained in:
parent
4415aa5c2e
commit
467fe30a5e
276 changed files with 38996 additions and 14449 deletions
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
|
@ -91,6 +91,6 @@ func (c *Client) AddDebugHandlers() {
|
|||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
|
||||
c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
||||
|
|
102
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
102
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
|
@ -44,12 +44,22 @@ func (reader *teeReaderCloser) Close() error {
|
|||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -63,7 +73,28 @@ func logRequest(r *request.Request) {
|
|||
r.ResetBody()
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
|
@ -76,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
|||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
body, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
|
||||
if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
|
@ -110,3 +158,27 @@ func logResponse(r *request.Request) {
|
|||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
|
@ -158,13 +158,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
|||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
if e.CurrentTime == nil {
|
||||
e.CurrentTime = time.Now
|
||||
curTime := e.CurrentTime
|
||||
if curTime == nil {
|
||||
curTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(e.CurrentTime())
|
||||
return e.expiration.Before(curTime())
|
||||
}
|
||||
|
||||
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
||||
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
|
@ -4,7 +4,6 @@ import (
|
|||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -12,6 +11,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of EC2Role provider
|
||||
|
@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
|
|||
Message string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "/iam/security-credentials"
|
||||
const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
|
@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
|||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
||||
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
|
@ -90,14 +90,13 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
|||
}
|
||||
|
||||
func setError(m *metric, err awserr.Error) {
|
||||
msg := err.Message()
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case "RequestError",
|
||||
"SerializationError",
|
||||
request.CanceledErrorCode:
|
||||
|
||||
m.SDKException = &code
|
||||
m.SDKExceptionMessage = &msg
|
||||
default:
|
||||
|
@ -223,8 +222,10 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
|||
}
|
||||
|
||||
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
|
||||
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
|
||||
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
|
||||
|
||||
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
|
||||
}
|
||||
|
|
21
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
21
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
@ -92,14 +92,25 @@ func Handlers() request.Handlers {
|
|||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||
Providers: []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
},
|
||||
Providers: CredProviders(cfg, handlers),
|
||||
})
|
||||
}
|
||||
|
||||
// CredProviders returns the slice of providers used in
|
||||
// the default credential chain.
|
||||
//
|
||||
// For applications that need to use some other provider (for example use
|
||||
// different environment variables for legacy reasons) but still fall back
|
||||
// on the default chain of providers. This allows that default chaint to be
|
||||
// automatically updated
|
||||
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
|
||||
return []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
||||
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
|
||||
|
|
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
@ -4,12 +4,12 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
|
@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "meta-data", p),
|
||||
HTTPPath: sdkuri.PathJoin("/meta-data", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "user-data"),
|
||||
HTTPPath: "/user-data",
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
|||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "dynamic", p),
|
||||
HTTPPath: sdkuri.PathJoin("/dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
|
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
|
@ -84,6 +84,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
|
|||
custAddEC2Metadata(p)
|
||||
custAddS3DualStack(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
|
@ -122,6 +123,27 @@ func custRmIotDataService(p *partition) {
|
|||
delete(p.Services, "data.iot")
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingChina(p *partition) {
|
||||
if p.ID != "aws-cn" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
const expectHostname = `autoscaling.{region}.amazonaws.com`
|
||||
if e, a := s.Defaults.Hostname, expectHostname; e != a {
|
||||
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.Hostname = expectHostname + ".cn"
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
|
256
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
256
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -50,6 +50,7 @@ const (
|
|||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
|
@ -112,6 +113,7 @@ const (
|
|||
ImportexportServiceID = "importexport" // Importexport.
|
||||
InspectorServiceID = "inspector" // Inspector.
|
||||
IotServiceID = "iot" // Iot.
|
||||
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
|
@ -146,7 +148,6 @@ const (
|
|||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
SagemakerServiceID = "sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
|
@ -304,6 +305,7 @@ var awsPartition = partition{
|
|||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
@ -330,6 +332,19 @@ var awsPartition = partition{
|
|||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"apigateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -394,10 +409,13 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
@ -453,6 +471,7 @@ var awsPartition = partition{
|
|||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -571,6 +590,7 @@ var awsPartition = partition{
|
|||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -786,10 +806,11 @@ var awsPartition = partition{
|
|||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
@ -1029,11 +1050,17 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"fips": endpoint{
|
||||
Hostname: "elasticache-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticbeanstalk": service{
|
||||
|
@ -1059,7 +1086,9 @@ var awsPartition = partition{
|
|||
"elasticfilesystem": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
|
@ -1094,7 +1123,7 @@ var awsPartition = partition{
|
|||
"elasticmapreduce": service{
|
||||
Defaults: endpoint{
|
||||
SSLCommonName: "{region}.{service}.{dnsSuffix}",
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
|
@ -1193,10 +1222,16 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -1208,6 +1243,7 @@ var awsPartition = partition{
|
|||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
|
@ -1246,6 +1282,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -1260,6 +1297,7 @@ var awsPartition = partition{
|
|||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
|
@ -1277,6 +1315,7 @@ var awsPartition = partition{
|
|||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
|
@ -1373,6 +1412,15 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"iotanalytics": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -1396,9 +1444,10 @@ var awsPartition = partition{
|
|||
"kinesisanalytics": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesisvideo": service{
|
||||
|
@ -1526,9 +1575,12 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
|
@ -1552,6 +1604,7 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
|
@ -1810,6 +1863,7 @@ var awsPartition = partition{
|
|||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
@ -1852,6 +1906,9 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
@ -1917,16 +1974,6 @@ var awsPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
|
@ -1957,11 +2004,36 @@ var awsPartition = partition{
|
|||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"serverlessrepo": service{
|
||||
|
@ -2036,10 +2108,16 @@ var awsPartition = partition{
|
|||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"shield": service{
|
||||
|
@ -2129,11 +2207,31 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips-us-east-1": endpoint{},
|
||||
"fips-us-east-2": endpoint{},
|
||||
"fips-us-west-1": endpoint{},
|
||||
"fips-us-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "sqs-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "sqs-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-1": endpoint{
|
||||
Hostname: "sqs-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "sqs-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{
|
||||
SSLCommonName: "queue.{dnsSuffix}",
|
||||
},
|
||||
|
@ -2167,6 +2265,7 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
|
@ -2340,8 +2439,26 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "translate-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "translate-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "translate-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"waf": service{
|
||||
|
@ -2461,12 +2578,13 @@ var awscnPartition = partition{
|
|||
"apigateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
Hostname: "autoscaling.{region}.amazonaws.com.cn",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "application-autoscaling",
|
||||
|
@ -2527,6 +2645,13 @@ var awscnPartition = partition{
|
|||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
|
@ -2595,7 +2720,7 @@ var awscnPartition = partition{
|
|||
},
|
||||
"elasticmapreduce": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
|
@ -2657,7 +2782,8 @@ var awscnPartition = partition{
|
|||
"lambda": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"logs": service{
|
||||
|
@ -2819,6 +2945,12 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -2923,6 +3055,12 @@ var awsusgovPartition = partition{
|
|||
"elasticache": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips": endpoint{
|
||||
Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
|
@ -2944,7 +3082,7 @@ var awsusgovPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -2981,6 +3119,22 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"inspector": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iot": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "execute-api",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -3097,6 +3251,12 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"storagegateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -3137,5 +3297,19 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"translate": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
|||
}
|
||||
|
||||
if t, ok := err.(temporaryError); ok {
|
||||
return t.Temporary()
|
||||
return t.Temporary() || isErrConnectionReset(err)
|
||||
}
|
||||
|
||||
return isErrConnectionReset(err)
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
|
@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared
|
|||
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
|
||||
|
||||
Credentials are the values the SDK should use for authenticating requests with
|
||||
AWS Services. They arfrom a configuration file will need to include both
|
||||
AWS Services. They are from a configuration file will need to include both
|
||||
aws_access_key_id and aws_secret_access_key must be provided together in the
|
||||
same file to be considered valid. The values will be ignored if not a complete
|
||||
group. aws_session_token is an optional field that can be provided if both of
|
||||
|
|
10
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
|
@ -739,7 +739,15 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
|
|||
start, _ := reader.Seek(0, sdkio.SeekCurrent)
|
||||
defer reader.Seek(start, sdkio.SeekStart)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
|
||||
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
|
||||
size, err := aws.SeekerLen(reader)
|
||||
if err != nil {
|
||||
io.Copy(hash, reader)
|
||||
} else {
|
||||
io.CopyN(hash, reader, size)
|
||||
}
|
||||
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.14.8"
|
||||
const SDKVersion = "1.15.39"
|
||||
|
|
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package sdkuri
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathJoin will join the elements of the path delimited by the "/"
|
||||
// character. Similar to path.Join with the exception the trailing "/"
|
||||
// character is preserved if present.
|
||||
func PathJoin(elems ...string) string {
|
||||
if len(elems) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
|
||||
str := path.Join(elems...)
|
||||
if hasTrailing && str != "/" {
|
||||
str += "/"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
36
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
|
@ -94,6 +94,9 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) {
|
|||
switch typ {
|
||||
case EventMessageType:
|
||||
return r.unmarshalEventMessage(msg)
|
||||
case ExceptionMessageType:
|
||||
err = r.unmarshalEventException(msg)
|
||||
return nil, err
|
||||
case ErrorMessageType:
|
||||
return nil, r.unmarshalErrorMessage(msg)
|
||||
default:
|
||||
|
@ -122,6 +125,39 @@ func (r *EventReader) unmarshalEventMessage(
|
|||
return ev, nil
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalEventException(
|
||||
msg eventstream.Message,
|
||||
) (err error) {
|
||||
eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ev, err := r.unmarshalerForEventType(eventType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
err, ok = ev.(error)
|
||||
if !ok {
|
||||
err = messageError{
|
||||
code: "SerializationError",
|
||||
msg: fmt.Sprintf(
|
||||
"event stream exception %s mapped to non-error %T, %v",
|
||||
eventType, ev, ev,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
|
||||
var msgErr messageError
|
||||
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
|
@ -464,7 +464,7 @@ func (v *TimestampValue) decode(r io.Reader) error {
|
|||
func timeFromEpochMilli(t int64) time.Time {
|
||||
secs := t / 1e3
|
||||
msec := t % 1e3
|
||||
return time.Unix(secs, msec*int64(time.Millisecond))
|
||||
return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
|
||||
}
|
||||
|
||||
// An UUIDValue provides eventstream encoding, and representation of a UUID
|
||||
|
|
7
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
|
@ -233,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
|
|||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
v.Set(name, value.UTC().Format(ISO8601UTC))
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
v.Set(name, protocol.FormatTime(format, value))
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
|
||||
}
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
generated
vendored
|
@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) {
|
|||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed decoding Query response", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
14
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
|
@ -28,7 +28,11 @@ func UnmarshalError(r *request.Request) {
|
|||
|
||||
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to read from query HTTP response body", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -61,6 +65,10 @@ func UnmarshalError(r *request.Request) {
|
|||
}
|
||||
|
||||
// Failed to retrieve any error message from the response body
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to decode query XML error response", decodeErr)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError",
|
||||
"failed to decode query XML error response", decodeErr),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
|
14
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -20,11 +20,6 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// RFC1123GMT is a RFC1123 (RFC822) formated timestame. This format is not
|
||||
// using the standard library's time.RFC1123 due to the desire to always use
|
||||
// GMT as the timezone.
|
||||
const RFC1123GMT = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
|
@ -272,7 +267,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
|||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
case time.Time:
|
||||
str = value.UTC().Format(RFC1123GMT)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
if tag.Get("location") == "querystring" {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
}
|
||||
str = protocol.FormatTime(format, value)
|
||||
case aws.JSONValue:
|
||||
if len(value) == 0 {
|
||||
return "", errValueNotSet
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
@ -198,7 +198,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
|||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
t, err := time.Parse(time.RFC1123, header)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
}
|
||||
t, err := protocol.ParseTime(format, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
12
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
generated
vendored
|
@ -36,7 +36,11 @@ func Build(r *request.Request) {
|
|||
var buf bytes.Buffer
|
||||
err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to encode rest XML request", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
r.SetBufferBody(buf.Bytes())
|
||||
|
@ -50,7 +54,11 @@ func Unmarshal(r *request.Request) {
|
|||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to decode REST XML response", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
|
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names of time formats supported by the SDK
|
||||
const (
|
||||
RFC822TimeFormatName = "rfc822"
|
||||
ISO8601TimeFormatName = "iso8601"
|
||||
UnixTimeFormatName = "unixTimestamp"
|
||||
)
|
||||
|
||||
// Time formats supported by the SDK
|
||||
const (
|
||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// IsKnownTimestampFormat returns if the timestamp format name
|
||||
// is know to the SDK's protocols.
|
||||
func IsKnownTimestampFormat(name string) bool {
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
fallthrough
|
||||
case ISO8601TimeFormatName:
|
||||
fallthrough
|
||||
case UnixTimeFormatName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// FormatTime returns a string value of the time.
|
||||
func FormatTime(name string, t time.Time) string {
|
||||
t = t.UTC()
|
||||
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
return t.Format(RFC822TimeFormat)
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601TimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
default:
|
||||
panic("unknown timestamp format name, " + name)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseTime attempts to parse the time given the format. Returns
|
||||
// the time if it was able to be parsed, and fails otherwise.
|
||||
func ParseTime(formatName, value string) (time.Time, error) {
|
||||
switch formatName {
|
||||
case RFC822TimeFormatName:
|
||||
return time.Parse(RFC822TimeFormat, value)
|
||||
case ISO8601TimeFormatName:
|
||||
return time.Parse(ISO8601TimeFormat, value)
|
||||
case UnixTimeFormatName:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(int64(v), 0), nil
|
||||
default:
|
||||
panic("unknown timestamp format name, " + formatName)
|
||||
}
|
||||
}
|
30
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
30
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
|
@ -13,9 +13,13 @@ import (
|
|||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder.
|
||||
// Error will be returned if the serialization of any of the params or nested values fails.
|
||||
// BuildXML will serialize params into an xml.Encoder. Error will be returned
|
||||
// if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
return buildXML(params, e, false)
|
||||
}
|
||||
|
||||
func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
|
||||
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
|
||||
root := NewXMLElement(xml.Name{})
|
||||
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
|
||||
|
@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error {
|
|||
}
|
||||
for _, c := range root.Children {
|
||||
for _, v := range c {
|
||||
return StructToXML(e, v, false)
|
||||
return StructToXML(e, v, sorted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -90,8 +94,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
|||
return nil
|
||||
}
|
||||
|
||||
fieldAdded := false
|
||||
|
||||
// unwrap payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := value.Type().FieldByName(payload)
|
||||
|
@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
|||
child.Attr = append(child.Attr, ns)
|
||||
}
|
||||
|
||||
var payloadFields, nonPayloadFields int
|
||||
|
||||
t := value.Type()
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
member := elemOf(value.Field(i))
|
||||
|
@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
|||
|
||||
mTag := field.Tag
|
||||
if mTag.Get("location") != "" { // skip non-body members
|
||||
nonPayloadFields++
|
||||
continue
|
||||
}
|
||||
payloadFields++
|
||||
|
||||
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
|
||||
token := protocol.GetIdempotencyToken()
|
||||
|
@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
|||
if err := b.buildValue(member, child, mTag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldAdded = true
|
||||
}
|
||||
|
||||
if fieldAdded { // only append this child if we have one ore more valid members
|
||||
// Only case where the child shape is not added is if the shape only contains
|
||||
// non-payload fields, e.g headers/query.
|
||||
if !(payloadFields == 0 && nonPayloadFields > 0) {
|
||||
current.AddChild(child)
|
||||
}
|
||||
|
||||
|
@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
|||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
str = converted.UTC().Format(ISO8601UTC)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
str = protocol.FormatTime(format, converted)
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)",
|
||||
tag.Get("locationName"), value.Interface(), value.Type().Name())
|
||||
|
|
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
|
@ -9,6 +9,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// UnmarshalXML deserializes an xml.Decoder into the container v. V
|
||||
|
@ -253,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
|||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
t, err := time.Parse(ISO8601UTC, node.Text)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
t, err := protocol.ParseTime(format, node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
|
@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode {
|
|||
|
||||
// AddChild adds child to the XMLNode.
|
||||
func (n *XMLNode) AddChild(child *XMLNode) {
|
||||
child.parent = n
|
||||
if _, ok := n.Children[child.Name.Local]; !ok {
|
||||
n.Children[child.Name.Local] = []*XMLNode{}
|
||||
}
|
||||
|
|
426
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
426
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
|
@ -11,7 +11,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/eventstream"
|
||||
|
@ -1956,6 +1958,8 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
|
|||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
|
||||
//
|
||||
// Deprecated: GetBucketLifecycle has been deprecated
|
||||
func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
|
||||
|
@ -1986,6 +1990,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
|
|||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation GetBucketLifecycle for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
|
||||
//
|
||||
// Deprecated: GetBucketLifecycle has been deprecated
|
||||
func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
|
||||
req, out := c.GetBucketLifecycleRequest(input)
|
||||
return out, req.Send()
|
||||
|
@ -2000,6 +2006,8 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec
|
|||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: GetBucketLifecycleWithContext has been deprecated
|
||||
func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
|
||||
req, out := c.GetBucketLifecycleRequest(input)
|
||||
req.SetContext(ctx)
|
||||
|
@ -2331,6 +2339,8 @@ const opGetBucketNotification = "GetBucketNotification"
|
|||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
|
||||
//
|
||||
// Deprecated: GetBucketNotification has been deprecated
|
||||
func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
|
||||
|
@ -2361,6 +2371,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
|
|||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation GetBucketNotification for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
|
||||
//
|
||||
// Deprecated: GetBucketNotification has been deprecated
|
||||
func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
|
||||
req, out := c.GetBucketNotificationRequest(input)
|
||||
return out, req.Send()
|
||||
|
@ -2375,6 +2387,8 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ
|
|||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: GetBucketNotificationWithContext has been deprecated
|
||||
func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
|
||||
req, out := c.GetBucketNotificationRequest(input)
|
||||
req.SetContext(ctx)
|
||||
|
@ -4813,6 +4827,8 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
|
|||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
|
||||
//
|
||||
// Deprecated: PutBucketLifecycle has been deprecated
|
||||
func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
|
||||
|
@ -4845,6 +4861,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
|
|||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation PutBucketLifecycle for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
|
||||
//
|
||||
// Deprecated: PutBucketLifecycle has been deprecated
|
||||
func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
|
||||
req, out := c.PutBucketLifecycleRequest(input)
|
||||
return out, req.Send()
|
||||
|
@ -4859,6 +4877,8 @@ func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifec
|
|||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: PutBucketLifecycleWithContext has been deprecated
|
||||
func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
|
||||
req, out := c.PutBucketLifecycleRequest(input)
|
||||
req.SetContext(ctx)
|
||||
|
@ -5124,6 +5144,8 @@ const opPutBucketNotification = "PutBucketNotification"
|
|||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
|
||||
//
|
||||
// Deprecated: PutBucketNotification has been deprecated
|
||||
func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
|
||||
|
@ -5156,6 +5178,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
|
|||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation PutBucketNotification for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
|
||||
//
|
||||
// Deprecated: PutBucketNotification has been deprecated
|
||||
func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
|
||||
req, out := c.PutBucketNotificationRequest(input)
|
||||
return out, req.Send()
|
||||
|
@ -5170,6 +5194,8 @@ func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucke
|
|||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: PutBucketNotificationWithContext has been deprecated
|
||||
func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
|
||||
req, out := c.PutBucketNotificationRequest(input)
|
||||
req.SetContext(ctx)
|
||||
|
@ -5377,7 +5403,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
|
|||
// PutBucketReplication API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Creates a new replication configuration (or replaces an existing one, if
|
||||
// present).
|
||||
// present). For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
@ -6062,6 +6089,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
|
|||
|
||||
output = &SelectObjectContentOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler)
|
||||
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler)
|
||||
req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop)
|
||||
return
|
||||
|
@ -6817,7 +6845,7 @@ type Bucket struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date the bucket was created.
|
||||
CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
CreationDate *time.Time `type:"timestamp"`
|
||||
|
||||
// The name of the bucket.
|
||||
Name *string `type:"string"`
|
||||
|
@ -7064,6 +7092,11 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
|
|||
type CSVInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies that CSV field values may contain quoted record delimiters and
|
||||
// such records should be allowed. Default value is FALSE. Setting this value
|
||||
// to TRUE may lower performance.
|
||||
AllowQuotedRecordDelimiter *bool `type:"boolean"`
|
||||
|
||||
// Single character used to indicate a row should be ignored when present at
|
||||
// the start of a row.
|
||||
Comments *string `type:"string"`
|
||||
|
@ -7095,6 +7128,12 @@ func (s CSVInput) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value.
|
||||
func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput {
|
||||
s.AllowQuotedRecordDelimiter = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetComments sets the Comments field's value.
|
||||
func (s *CSVInput) SetComments(v string) *CSVInput {
|
||||
s.Comments = &v
|
||||
|
@ -7198,6 +7237,8 @@ type CloudFunctionConfiguration struct {
|
|||
CloudFunction *string `type:"string"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
|
@ -7562,7 +7603,7 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
|
|||
}
|
||||
|
||||
type ContinuationEvent struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ContinuationEvent" type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -7623,14 +7664,14 @@ type CopyObjectInput struct {
|
|||
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
|
||||
|
||||
// Copies the object if it has been modified since the specified time.
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
|
||||
|
||||
// Copies the object if its entity tag (ETag) is different than the specified
|
||||
// ETag.
|
||||
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
|
||||
|
||||
// Copies the object if it hasn't been modified since the specified time.
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
|
||||
|
||||
// Specifies the algorithm to use when decrypting the source object (e.g., AES256).
|
||||
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
@ -7646,7 +7687,7 @@ type CopyObjectInput struct {
|
|||
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
@ -8075,7 +8116,7 @@ type CopyObjectResult struct {
|
|||
|
||||
ETag *string `type:"string"`
|
||||
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -8107,7 +8148,7 @@ type CopyPartResult struct {
|
|||
ETag *string `type:"string"`
|
||||
|
||||
// Date and time at which the object was uploaded.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -8311,7 +8352,7 @@ type CreateMultipartUploadInput struct {
|
|||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
@ -8559,7 +8600,7 @@ type CreateMultipartUploadOutput struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date when multipart upload will become eligible for abort operation by lifecycle.
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
|
||||
|
||||
// Id of the lifecycle rule that makes a multipart upload eligible for abort
|
||||
// operation.
|
||||
|
@ -9240,6 +9281,14 @@ func (s DeleteBucketPolicyOutput) GoString() string {
|
|||
type DeleteBucketReplicationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Deletes the replication subresource associated with the specified bucket.
|
||||
//
|
||||
// There is usually some time lag before replication configuration deletion
|
||||
// is fully propagated to all the Amazon S3 systems.
|
||||
//
|
||||
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
}
|
||||
|
@ -9419,7 +9468,7 @@ type DeleteMarkerEntry struct {
|
|||
Key *string `min:"1" type:"string"`
|
||||
|
||||
// Date and time the object was last modified.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
|
@ -9467,6 +9516,33 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
|
|||
return s
|
||||
}
|
||||
|
||||
// Specifies whether Amazon S3 should replicate delete makers.
|
||||
type DeleteMarkerReplication struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The status of the delete marker replication.
|
||||
//
|
||||
// In the current implementation, Amazon S3 does not replicate the delete markers.
|
||||
// Therefore, the status must be Disabled.
|
||||
Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DeleteMarkerReplication) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DeleteMarkerReplication) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetStatus sets the Status field's value.
|
||||
func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
|
||||
s.Status = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type DeleteObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -9863,19 +9939,34 @@ type Destination struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for information regarding the access control for replicas.
|
||||
//
|
||||
// Use only in a cross-account scenario, where source and destination bucket
|
||||
// owners are not the same, when you want to change replica ownership to the
|
||||
// AWS account that owns the destination bucket. If you don't add this element
|
||||
// to the replication configuration, the replicas are owned by same AWS account
|
||||
// that owns the source object.
|
||||
AccessControlTranslation *AccessControlTranslation `type:"structure"`
|
||||
|
||||
// Account ID of the destination bucket. Currently this is only being verified
|
||||
// if Access Control Translation is enabled
|
||||
// Account ID of the destination bucket. Currently Amazon S3 verifies this value
|
||||
// only if Access Control Translation is enabled.
|
||||
//
|
||||
// In a cross-account scenario, if you tell Amazon S3 to change replica ownership
|
||||
// to the AWS account that owns the destination bucket by adding the AccessControlTranslation
|
||||
// element, this is the account ID of the destination bucket owner.
|
||||
Account *string `type:"string"`
|
||||
|
||||
// Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
|
||||
// replicas of the object identified by the rule.
|
||||
//
|
||||
// If you have multiple rules in your replication configuration, all rules must
|
||||
// specify the same bucket as the destination. A replication configuration can
|
||||
// replicate objects only to one destination bucket.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `type:"string" required:"true"`
|
||||
|
||||
// Container for information regarding encryption based configuration for replicas.
|
||||
// Container that provides encryption-related information. You must specify
|
||||
// this element if the SourceSelectionCriteria is specified.
|
||||
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
|
@ -10012,7 +10103,8 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
|
|||
type EncryptionConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The id of the KMS key used to encrypt the replica object.
|
||||
// The ID of the AWS KMS key for the region where the destination bucket resides.
|
||||
// Amazon S3 uses this key to encrypt the replica object.
|
||||
ReplicaKmsKeyID *string `type:"string"`
|
||||
}
|
||||
|
||||
|
@ -10033,7 +10125,7 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig
|
|||
}
|
||||
|
||||
type EndEvent struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"EndEvent" type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -10153,6 +10245,7 @@ type FilterRule struct {
|
|||
// the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
|
||||
// Overlapping prefixes and suffixes are not supported. For more information,
|
||||
// go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Name *string `type:"string" enum:"FilterRuleName"`
|
||||
|
||||
Value *string `type:"string"`
|
||||
|
@ -11574,7 +11667,7 @@ type GetObjectInput struct {
|
|||
|
||||
// Return the object only if it has been modified since the specified time,
|
||||
// otherwise return a 304 (not modified).
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
|
||||
|
||||
// Return the object only if its entity tag (ETag) is different from the one
|
||||
// specified, otherwise return a 304 (not modified).
|
||||
|
@ -11582,7 +11675,7 @@ type GetObjectInput struct {
|
|||
|
||||
// Return the object only if it has not been modified since the specified time,
|
||||
// otherwise return a 412 (precondition failed).
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
|
||||
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
@ -11618,7 +11711,7 @@ type GetObjectInput struct {
|
|||
ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
|
||||
|
||||
// Sets the Expires header of the response.
|
||||
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
|
||||
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
@ -11845,7 +11938,7 @@ type GetObjectOutput struct {
|
|||
Expires *string `location:"header" locationName:"Expires" type:"string"`
|
||||
|
||||
// Last modified date of the object
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
@ -12505,7 +12598,7 @@ type HeadObjectInput struct {
|
|||
|
||||
// Return the object only if it has been modified since the specified time,
|
||||
// otherwise return a 304 (not modified).
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
|
||||
|
||||
// Return the object only if its entity tag (ETag) is different from the one
|
||||
// specified, otherwise return a 304 (not modified).
|
||||
|
@ -12513,7 +12606,7 @@ type HeadObjectInput struct {
|
|||
|
||||
// Return the object only if it has not been modified since the specified time,
|
||||
// otherwise return a 412 (precondition failed).
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
|
||||
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
@ -12717,7 +12810,7 @@ type HeadObjectOutput struct {
|
|||
Expires *string `location:"header" locationName:"Expires" type:"string"`
|
||||
|
||||
// Last modified date of the object
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
|
@ -13011,12 +13104,15 @@ type InputSerialization struct {
|
|||
// Describes the serialization of a CSV-encoded object.
|
||||
CSV *CSVInput `type:"structure"`
|
||||
|
||||
// Specifies object's compression format. Valid values: NONE, GZIP. Default
|
||||
// Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
|
||||
// Value: NONE.
|
||||
CompressionType *string `type:"string" enum:"CompressionType"`
|
||||
|
||||
// Specifies JSON as object's input serialization format.
|
||||
JSON *JSONInput `type:"structure"`
|
||||
|
||||
// Specifies Parquet as object's input serialization format.
|
||||
Parquet *ParquetInput `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
|
@ -13047,6 +13143,12 @@ func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetParquet sets the Parquet field's value.
|
||||
func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization {
|
||||
s.Parquet = v
|
||||
return s
|
||||
}
|
||||
|
||||
type InventoryConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -13517,6 +13619,7 @@ type LambdaFunctionConfiguration struct {
|
|||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
|
@ -13699,6 +13802,8 @@ type LifecycleRule struct {
|
|||
|
||||
// Prefix identifying one or more objects to which the rule applies. This is
|
||||
// deprecated; use Filter instead.
|
||||
//
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
|
||||
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
|
||||
|
@ -15334,7 +15439,7 @@ type ListPartsOutput struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date when multipart upload will become eligible for abort operation by lifecycle.
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
|
||||
|
||||
// Id of the lifecycle rule that makes a multipart upload eligible for abort
|
||||
// operation.
|
||||
|
@ -15890,7 +15995,7 @@ type MultipartUpload struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date and time at which the multipart upload was initiated.
|
||||
Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
Initiated *time.Time `type:"timestamp"`
|
||||
|
||||
// Identifies who initiated the multipart upload.
|
||||
Initiator *Initiator `type:"structure"`
|
||||
|
@ -15964,7 +16069,8 @@ type NoncurrentVersionExpiration struct {
|
|||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
|
@ -15996,7 +16102,8 @@ type NoncurrentVersionTransition struct {
|
|||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
|
@ -16145,6 +16252,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
|
|||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
type NotificationConfigurationFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -16175,7 +16283,7 @@ type Object struct {
|
|||
|
||||
Key *string `min:"1" type:"string"`
|
||||
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
|
@ -16294,7 +16402,7 @@ type ObjectVersion struct {
|
|||
Key *string `min:"1" type:"string"`
|
||||
|
||||
// Date and time the object was last modified.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
|
@ -16468,6 +16576,20 @@ func (s *Owner) SetID(v string) *Owner {
|
|||
return s
|
||||
}
|
||||
|
||||
type ParquetInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ParquetInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ParquetInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
type Part struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -16475,7 +16597,7 @@ type Part struct {
|
|||
ETag *string `type:"string"`
|
||||
|
||||
// Date and time at which the part was uploaded.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
// Part number identifying the part. This is a positive integer between 1 and
|
||||
// 10,000.
|
||||
|
@ -16561,7 +16683,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress {
|
|||
}
|
||||
|
||||
type ProgressEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Details"`
|
||||
_ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"`
|
||||
|
||||
// The Progress event details.
|
||||
Details *Progress `locationName:"Details" type:"structure"`
|
||||
|
@ -16595,7 +16717,7 @@ func (s *ProgressEvent) UnmarshalEvent(
|
|||
if err := payloadUnmarshaler.UnmarshalPayload(
|
||||
bytes.NewReader(msg.Payload), s,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal payload, %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -18257,7 +18379,7 @@ type PutObjectInput struct {
|
|||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
|
@ -18730,6 +18852,7 @@ type QueueConfiguration struct {
|
|||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
|
@ -18797,6 +18920,8 @@ type QueueConfigurationDeprecated struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
|
@ -18843,7 +18968,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep
|
|||
}
|
||||
|
||||
type RecordsEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Payload"`
|
||||
_ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"`
|
||||
|
||||
// The byte array of partial, one or more result records.
|
||||
//
|
||||
|
@ -19008,8 +19133,8 @@ type ReplicationConfiguration struct {
|
|||
// Role is a required field
|
||||
Role *string `type:"string" required:"true"`
|
||||
|
||||
// Container for information about a particular replication rule. Replication
|
||||
// configuration must have at least one rule and can contain up to 1,000 rules.
|
||||
// Container for one or more replication rules. Replication configuration must
|
||||
// have at least one rule and can contain up to 1,000 rules.
|
||||
//
|
||||
// Rules is a required field
|
||||
Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
|
||||
|
@ -19067,22 +19192,50 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
|
|||
type ReplicationRule struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies whether Amazon S3 should replicate delete makers.
|
||||
DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"`
|
||||
|
||||
// Container for replication destination information.
|
||||
//
|
||||
// Destination is a required field
|
||||
Destination *Destination `type:"structure" required:"true"`
|
||||
|
||||
// Filter that identifies subset of objects to which the replication rule applies.
|
||||
// A Filter must specify exactly one Prefix, Tag, or an And child element.
|
||||
Filter *ReplicationRuleFilter `type:"structure"`
|
||||
|
||||
// Unique identifier for the rule. The value cannot be longer than 255 characters.
|
||||
ID *string `type:"string"`
|
||||
|
||||
// Object keyname prefix identifying one or more objects to which the rule applies.
|
||||
// Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
|
||||
// are not supported.
|
||||
// Maximum prefix length can be up to 1,024 characters.
|
||||
//
|
||||
// Prefix is a required field
|
||||
Prefix *string `type:"string" required:"true"`
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
|
||||
// Container for filters that define which source objects should be replicated.
|
||||
// The priority associated with the rule. If you specify multiple rules in a
|
||||
// replication configuration, then Amazon S3 applies rule priority in the event
|
||||
// there are conflicts (two or more rules identify the same object based on
|
||||
// filter specified). The rule with higher priority takes precedence. For example,
|
||||
//
|
||||
// * Same object quality prefix based filter criteria If prefixes you specified
|
||||
// in multiple rules overlap.
|
||||
//
|
||||
// * Same object qualify tag based filter criteria specified in multiple
|
||||
// rules
|
||||
//
|
||||
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
Priority *int64 `type:"integer"`
|
||||
|
||||
// Container that describes additional filters in identifying source objects
|
||||
// that you want to replicate. Currently, Amazon S3 supports only the filter
|
||||
// that you can specify for objects created with server-side encryption using
|
||||
// an AWS KMS-managed key. You can choose to enable or disable replication of
|
||||
// these objects.
|
||||
//
|
||||
// if you want Amazon S3 to replicate objects created with server-side encryption
|
||||
// using AWS KMS-managed keys.
|
||||
SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
|
||||
|
||||
// The rule is ignored if status is not Enabled.
|
||||
|
@ -19107,9 +19260,6 @@ func (s *ReplicationRule) Validate() error {
|
|||
if s.Destination == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Destination"))
|
||||
}
|
||||
if s.Prefix == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Prefix"))
|
||||
}
|
||||
if s.Status == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Status"))
|
||||
}
|
||||
|
@ -19118,6 +19268,11 @@ func (s *ReplicationRule) Validate() error {
|
|||
invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.Filter != nil {
|
||||
if err := s.Filter.Validate(); err != nil {
|
||||
invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.SourceSelectionCriteria != nil {
|
||||
if err := s.SourceSelectionCriteria.Validate(); err != nil {
|
||||
invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams))
|
||||
|
@ -19130,12 +19285,24 @@ func (s *ReplicationRule) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value.
|
||||
func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule {
|
||||
s.DeleteMarkerReplication = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDestination sets the Destination field's value.
|
||||
func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
|
||||
s.Destination = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFilter sets the Filter field's value.
|
||||
func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
|
||||
s.Filter = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetID sets the ID field's value.
|
||||
func (s *ReplicationRule) SetID(v string) *ReplicationRule {
|
||||
s.ID = &v
|
||||
|
@ -19148,6 +19315,12 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetPriority sets the Priority field's value.
|
||||
func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule {
|
||||
s.Priority = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value.
|
||||
func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule {
|
||||
s.SourceSelectionCriteria = v
|
||||
|
@ -19160,6 +19333,130 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
|
|||
return s
|
||||
}
|
||||
|
||||
type ReplicationRuleAndOperator struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
Prefix *string `type:"string"`
|
||||
|
||||
Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ReplicationRuleAndOperator) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ReplicationRuleAndOperator) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *ReplicationRuleAndOperator) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"}
|
||||
if s.Tags != nil {
|
||||
for i, v := range s.Tags {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPrefix sets the Prefix field's value.
|
||||
func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator {
|
||||
s.Prefix = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTags sets the Tags field's value.
|
||||
func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator {
|
||||
s.Tags = v
|
||||
return s
|
||||
}
|
||||
|
||||
// Filter that identifies subset of objects to which the replication rule applies.
|
||||
// A Filter must specify exactly one Prefix, Tag, or an And child element.
|
||||
type ReplicationRuleFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for specifying rule filters. These filters determine the subset
|
||||
// of objects to which the rule applies. The element is required only if you
|
||||
// specify more than one filter. For example:
|
||||
//
|
||||
// * You specify both a Prefix and a Tag filters. Then you wrap these in
|
||||
// an And tag.
|
||||
//
|
||||
// * You specify filter based on multiple tags. Then you wrap the Tag elements
|
||||
// in an And tag.
|
||||
And *ReplicationRuleAndOperator `type:"structure"`
|
||||
|
||||
// Object keyname prefix that identifies subset of objects to which the rule
|
||||
// applies.
|
||||
Prefix *string `type:"string"`
|
||||
|
||||
// Container for specifying a tag key and value.
|
||||
//
|
||||
// The rule applies only to objects having the tag in its tagset.
|
||||
Tag *Tag `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ReplicationRuleFilter) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ReplicationRuleFilter) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *ReplicationRuleFilter) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"}
|
||||
if s.And != nil {
|
||||
if err := s.And.Validate(); err != nil {
|
||||
invalidParams.AddNested("And", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.Tag != nil {
|
||||
if err := s.Tag.Validate(); err != nil {
|
||||
invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnd sets the And field's value.
|
||||
func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter {
|
||||
s.And = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetPrefix sets the Prefix field's value.
|
||||
func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter {
|
||||
s.Prefix = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTag sets the Tag field's value.
|
||||
func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
|
||||
s.Tag = v
|
||||
return s
|
||||
}
|
||||
|
||||
type RequestPaymentConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -19887,8 +20184,11 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
|||
case "Stats":
|
||||
return &StatsEvent{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"unknown event type name, %s, for SelectObjectContentEventStream", eventType)
|
||||
return nil, awserr.New(
|
||||
request.ErrCodeSerialization,
|
||||
fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19898,7 +20198,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
|||
// Amazon S3 uses this to parse object data into records, and returns only records
|
||||
// that match the specified SQL expression. You must also specify the data serialization
|
||||
// format for the response. For more information, go to S3Select API Documentation
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
type SelectObjectContentInput struct {
|
||||
_ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
|
@ -19936,15 +20236,15 @@ type SelectObjectContentInput struct {
|
|||
RequestProgress *RequestProgress `type:"structure"`
|
||||
|
||||
// The SSE Algorithm used to encrypt the object. For more information, go to
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// The SSE Customer Key. For more information, go to Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
|
||||
|
||||
// The SSE Customer Key MD5. For more information, go to Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
}
|
||||
|
||||
|
@ -20335,6 +20635,8 @@ type SourceSelectionCriteria struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for filter information of selection of KMS Encrypted S3 objects.
|
||||
// The element is required if you include SourceSelectionCriteria in the replication
|
||||
// configuration.
|
||||
SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
|
||||
}
|
||||
|
||||
|
@ -20451,7 +20753,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats {
|
|||
}
|
||||
|
||||
type StatsEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Details"`
|
||||
_ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"`
|
||||
|
||||
// The Stats event details.
|
||||
Details *Stats `locationName:"Details" type:"structure"`
|
||||
|
@ -20485,7 +20787,7 @@ func (s *StatsEvent) UnmarshalEvent(
|
|||
if err := payloadUnmarshaler.UnmarshalPayload(
|
||||
bytes.NewReader(msg.Payload), s,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal payload, %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -20743,6 +21045,7 @@ type TopicConfiguration struct {
|
|||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
|
@ -20810,6 +21113,8 @@ type TopicConfigurationDeprecated struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
|
@ -20916,14 +21221,14 @@ type UploadPartCopyInput struct {
|
|||
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
|
||||
|
||||
// Copies the object if it has been modified since the specified time.
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
|
||||
|
||||
// Copies the object if its entity tag (ETag) is different than the specified
|
||||
// ETag.
|
||||
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
|
||||
|
||||
// Copies the object if it hasn't been modified since the specified time.
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
|
||||
|
||||
// The range of bytes to copy from the source object. The range value must use
|
||||
// the form bytes=first-last, where the first and last are the zero-based byte
|
||||
|
@ -21676,6 +21981,17 @@ const (
|
|||
|
||||
// CompressionTypeGzip is a CompressionType enum value
|
||||
CompressionTypeGzip = "GZIP"
|
||||
|
||||
// CompressionTypeBzip2 is a CompressionType enum value
|
||||
CompressionTypeBzip2 = "BZIP2"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
|
||||
DeleteMarkerReplicationStatusEnabled = "Enabled"
|
||||
|
||||
// DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value
|
||||
DeleteMarkerReplicationStatusDisabled = "Disabled"
|
||||
)
|
||||
|
||||
// Requests Amazon S3 to encode the object keys in the response and specifies
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
|
@ -338,6 +338,11 @@ func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
|
|||
}
|
||||
}
|
||||
|
||||
// iter.Next() could return false (above) plus populate iter.Err()
|
||||
if iter.Err() != nil {
|
||||
errs = append(errs, newError(iter.Err(), nil, nil))
|
||||
}
|
||||
|
||||
if input != nil && len(input.Delete.Objects) > 0 {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
|
|
14
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
|
@ -39,6 +39,8 @@ type Downloader struct {
|
|||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
|
||||
//
|
||||
// Concurrency of 1 will download the parts sequentially.
|
||||
//
|
||||
// Concurrency is ignored if the Range input parameter is provided.
|
||||
Concurrency int
|
||||
|
||||
|
@ -135,6 +137,9 @@ type maxRetrier interface {
|
|||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
// to perform a single GetObjectInput request for that object's range. This will
|
||||
// caused the part size, and concurrency configurations to be ignored.
|
||||
|
@ -147,7 +152,7 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
|
|||
//
|
||||
// DownloadWithContext is the same as Download with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the Context to add deadlining, timeouts, ect. The
|
||||
// cause a panic. Use the Context to add deadlining, timeouts, etc. The
|
||||
// DownloadWithContext may create sub-contexts for individual underlying
|
||||
// requests.
|
||||
//
|
||||
|
@ -160,6 +165,9 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
|
|||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
|
@ -207,14 +215,14 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s
|
|||
//
|
||||
// objects := []s3manager.BatchDownloadObject {
|
||||
// {
|
||||
// Input: &s3.GetObjectInput {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("foo"),
|
||||
// },
|
||||
// Writer: fooFile,
|
||||
// },
|
||||
// {
|
||||
// Input: &s3.GetObjectInput {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("bar"),
|
||||
// },
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
|
@ -241,6 +241,8 @@ type Uploader struct {
|
|||
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
||||
// as 100, 50MB parts.
|
||||
// With a limited of s3.MaxUploadParts (10,000 parts).
|
||||
//
|
||||
// Defaults to package const's MaxUploadParts value.
|
||||
MaxUploadParts int
|
||||
|
||||
// The client to use when uploading to S3.
|
||||
|
@ -357,7 +359,7 @@ func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*Uploa
|
|||
//
|
||||
// UploadWithContext is the same as Upload with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the context to add deadlining, timeouts, ect. The
|
||||
// cause a panic. Use the context to add deadlining, timeouts, etc. The
|
||||
// UploadWithContext may create sub-contexts for individual underlying requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
|
@ -395,7 +397,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
|
|||
// },
|
||||
// }
|
||||
//
|
||||
// iter := &s3managee.UploadObjectsIterator{Objects: objects}
|
||||
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
|
||||
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
@ -477,6 +479,9 @@ func (u *uploader) init() {
|
|||
if u.cfg.PartSize == 0 {
|
||||
u.cfg.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
if u.cfg.MaxUploadParts == 0 {
|
||||
u.cfg.MaxUploadParts = MaxUploadParts
|
||||
}
|
||||
|
||||
u.bufferPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -29,9 +29,9 @@ var initRequest func(*request.Request)
|
|||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "s3" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service
|
||||
ServiceName = "s3" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the S3 client with a session.
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -1908,7 +1908,7 @@ type Credentials struct {
|
|||
// The date on which the current credentials expire.
|
||||
//
|
||||
// Expiration is a required field
|
||||
Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
|
||||
Expiration *time.Time `type:"timestamp" required:"true"`
|
||||
|
||||
// The secret access key that can be used to sign requests.
|
||||
//
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
|
@ -29,9 +29,9 @@ var initRequest func(*request.Request)
|
|||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "sts" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
ServiceID = "STS" // ServiceID is a unique identifer of a specific service
|
||||
ServiceName = "sts" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the STS client with a session.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue