vendor: update all dependencies

This commit is contained in:
Nick Craig-Wood 2019-11-11 15:04:53 +00:00
parent 3dcf1e61cf
commit 479c803fd9
446 changed files with 25054 additions and 8064 deletions

View file

@ -7043,7 +7043,7 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
}
type AbortMultipartUploadInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"`
// Name of the bucket to which the multipart upload was initiated.
//
@ -8084,7 +8084,7 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
}
type CompleteMultipartUploadInput struct {
_ struct{} `type:"structure" payload:"MultipartUpload"`
_ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -8404,7 +8404,7 @@ func (s *ContinuationEvent) UnmarshalEvent(
}
type CopyObjectInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"CopyObjectRequest" type:"structure"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
@ -9025,7 +9025,7 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke
}
type CreateBucketInput struct {
_ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
_ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
// The canned ACL to apply to the bucket.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
@ -9166,7 +9166,7 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
}
type CreateMultipartUploadInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
@ -9708,7 +9708,7 @@ func (s *Delete) SetQuiet(v bool) *Delete {
}
type DeleteBucketAnalyticsConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"`
// The name of the bucket from which an analytics configuration is deleted.
//
@ -9784,7 +9784,7 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
}
type DeleteBucketCorsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -9844,7 +9844,7 @@ func (s DeleteBucketCorsOutput) GoString() string {
}
type DeleteBucketEncryptionInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"`
// The name of the bucket containing the server-side encryption configuration
// to delete.
@ -9907,7 +9907,7 @@ func (s DeleteBucketEncryptionOutput) GoString() string {
}
type DeleteBucketInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -9953,7 +9953,7 @@ func (s *DeleteBucketInput) getBucket() (v string) {
}
type DeleteBucketInventoryConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"`
// The name of the bucket containing the inventory configuration to delete.
//
@ -10029,7 +10029,7 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
}
type DeleteBucketLifecycleInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10089,7 +10089,7 @@ func (s DeleteBucketLifecycleOutput) GoString() string {
}
type DeleteBucketMetricsConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"`
// The name of the bucket containing the metrics configuration to delete.
//
@ -10179,7 +10179,7 @@ func (s DeleteBucketOutput) GoString() string {
}
type DeleteBucketPolicyInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10239,7 +10239,7 @@ func (s DeleteBucketPolicyOutput) GoString() string {
}
type DeleteBucketReplicationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"`
// The bucket name.
//
@ -10304,7 +10304,7 @@ func (s DeleteBucketReplicationOutput) GoString() string {
}
type DeleteBucketTaggingInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10364,7 +10364,7 @@ func (s DeleteBucketTaggingOutput) GoString() string {
}
type DeleteBucketWebsiteInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10510,7 +10510,7 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
}
type DeleteObjectInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteObjectRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10656,7 +10656,7 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
}
type DeleteObjectTaggingInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10749,7 +10749,7 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO
}
type DeleteObjectsInput struct {
_ struct{} `type:"structure" payload:"Delete"`
_ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -10885,7 +10885,7 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
}
type DeletePublicAccessBlockInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"`
// The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
//
@ -11341,7 +11341,7 @@ func (s *FilterRule) SetValue(v string) *FilterRule {
}
type GetBucketAccelerateConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"`
// Name of the bucket for which the accelerate configuration is retrieved.
//
@ -11412,7 +11412,7 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA
}
type GetBucketAclInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketAclRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -11489,7 +11489,7 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
}
type GetBucketAnalyticsConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"`
// The name of the bucket from which an analytics configuration is retrieved.
//
@ -11574,7 +11574,7 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana
}
type GetBucketCorsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketCorsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -11642,7 +11642,7 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
}
type GetBucketEncryptionInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"`
// The name of the bucket from which the server-side encryption configuration
// is retrieved.
@ -11714,7 +11714,7 @@ func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *Serv
}
type GetBucketInventoryConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"`
// The name of the bucket containing the inventory configuration to retrieve.
//
@ -11799,7 +11799,7 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv
}
type GetBucketLifecycleConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -11867,7 +11867,7 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge
}
type GetBucketLifecycleInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -11935,7 +11935,7 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput
}
type GetBucketLocationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketLocationRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12003,7 +12003,7 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca
}
type GetBucketLoggingInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12075,7 +12075,7 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket
}
type GetBucketMetricsConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"`
// The name of the bucket containing the metrics configuration to retrieve.
//
@ -12160,7 +12160,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics
}
type GetBucketNotificationConfigurationRequest struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"`
// Name of the bucket to get the notification configuration for.
//
@ -12208,7 +12208,7 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
}
type GetBucketPolicyInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12277,7 +12277,7 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
}
type GetBucketPolicyStatusInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"`
// The name of the Amazon S3 bucket whose policy status you want to retrieve.
//
@ -12348,7 +12348,7 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke
}
type GetBucketReplicationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12418,7 +12418,7 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC
}
type GetBucketRequestPaymentInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12487,7 +12487,7 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym
}
type GetBucketTaggingInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12556,7 +12556,7 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
}
type GetBucketVersioningInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12636,7 +12636,7 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp
}
type GetBucketWebsiteInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12730,7 +12730,7 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb
}
type GetObjectAclInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectAclRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -12853,7 +12853,7 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
}
type GetObjectInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -13090,7 +13090,7 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
}
type GetObjectLegalHoldInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"`
// The bucket containing the object whose Legal Hold status you want to retrieve.
//
@ -13199,7 +13199,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
}
type GetObjectLockConfigurationInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"`
// The bucket whose object lock configuration you want to retrieve.
//
@ -13581,7 +13581,7 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput
}
type GetObjectRetentionInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"`
// The bucket containing the object whose retention settings you want to retrieve.
//
@ -13690,7 +13690,7 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje
}
type GetObjectTaggingInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -13790,7 +13790,7 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput
}
type GetObjectTorrentInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -13895,7 +13895,7 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu
}
type GetPublicAccessBlockInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"`
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
// want to retrieve.
@ -14126,7 +14126,7 @@ func (s *Grantee) SetURI(v string) *Grantee {
}
type HeadBucketInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"HeadBucketRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -14186,7 +14186,7 @@ func (s HeadBucketOutput) GoString() string {
}
type HeadObjectInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"HeadObjectRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -15661,7 +15661,7 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
}
type ListBucketAnalyticsConfigurationsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"`
// The name of the bucket from which analytics configurations are retrieved.
//
@ -15773,7 +15773,7 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str
}
type ListBucketInventoryConfigurationsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"`
// The name of the bucket containing the inventory configurations to retrieve.
//
@ -15887,7 +15887,7 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str
}
type ListBucketMetricsConfigurationsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"`
// The name of the bucket containing the metrics configurations to retrieve.
//
@ -16047,7 +16047,7 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
}
type ListMultipartUploadsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -16291,7 +16291,7 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti
}
type ListObjectVersionsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -16524,7 +16524,7 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe
}
type ListObjectsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListObjectsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -16736,7 +16736,7 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
}
type ListObjectsV2Input struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
// Name of the bucket to list.
//
@ -16997,7 +16997,7 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
}
type ListPartsInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"ListPartsRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -18622,7 +18622,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi
}
type PutBucketAccelerateConfigurationInput struct {
_ struct{} `type:"structure" payload:"AccelerateConfiguration"`
_ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"`
// Specifies the Accelerate Configuration you want to set for the bucket.
//
@ -18698,7 +18698,7 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string {
}
type PutBucketAclInput struct {
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
_ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"`
// The canned ACL to apply to the bucket.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
@ -18827,7 +18827,7 @@ func (s PutBucketAclOutput) GoString() string {
}
type PutBucketAnalyticsConfigurationInput struct {
_ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
_ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"`
// The configuration and any analyses for the analytics filter.
//
@ -18922,7 +18922,7 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
}
type PutBucketCorsInput struct {
_ struct{} `type:"structure" payload:"CORSConfiguration"`
_ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19004,7 +19004,7 @@ func (s PutBucketCorsOutput) GoString() string {
}
type PutBucketEncryptionInput struct {
_ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
_ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"`
// Specifies default encryption for a bucket using server-side encryption with
// Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
@ -19089,7 +19089,7 @@ func (s PutBucketEncryptionOutput) GoString() string {
}
type PutBucketInventoryConfigurationInput struct {
_ struct{} `type:"structure" payload:"InventoryConfiguration"`
_ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"`
// The name of the bucket where the inventory configuration will be stored.
//
@ -19184,7 +19184,7 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string {
}
type PutBucketLifecycleConfigurationInput struct {
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
_ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19260,7 +19260,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string {
}
type PutBucketLifecycleInput struct {
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
_ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19333,7 +19333,7 @@ func (s PutBucketLifecycleOutput) GoString() string {
}
type PutBucketLoggingInput struct {
_ struct{} `type:"structure" payload:"BucketLoggingStatus"`
_ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19410,7 +19410,7 @@ func (s PutBucketLoggingOutput) GoString() string {
}
type PutBucketMetricsConfigurationInput struct {
_ struct{} `type:"structure" payload:"MetricsConfiguration"`
_ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"`
// The name of the bucket for which the metrics configuration is set.
//
@ -19505,7 +19505,7 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string {
}
type PutBucketNotificationConfigurationInput struct {
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
_ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19585,7 +19585,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
}
type PutBucketNotificationInput struct {
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
_ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19657,7 +19657,7 @@ func (s PutBucketNotificationOutput) GoString() string {
}
type PutBucketPolicyInput struct {
_ struct{} `type:"structure" payload:"Policy"`
_ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19741,7 +19741,7 @@ func (s PutBucketPolicyOutput) GoString() string {
}
type PutBucketReplicationInput struct {
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
_ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19830,7 +19830,7 @@ func (s PutBucketReplicationOutput) GoString() string {
}
type PutBucketRequestPaymentInput struct {
_ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
_ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19907,7 +19907,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
}
type PutBucketTaggingInput struct {
_ struct{} `type:"structure" payload:"Tagging"`
_ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -19984,7 +19984,7 @@ func (s PutBucketTaggingOutput) GoString() string {
}
type PutBucketVersioningInput struct {
_ struct{} `type:"structure" payload:"VersioningConfiguration"`
_ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -20070,7 +20070,7 @@ func (s PutBucketVersioningOutput) GoString() string {
}
type PutBucketWebsiteInput struct {
_ struct{} `type:"structure" payload:"WebsiteConfiguration"`
_ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -20149,7 +20149,7 @@ func (s PutBucketWebsiteOutput) GoString() string {
}
type PutObjectAclInput struct {
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
_ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
@ -20324,7 +20324,7 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
}
type PutObjectInput struct {
_ struct{} `type:"structure" payload:"Body"`
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
@ -20671,7 +20671,7 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
}
type PutObjectLegalHoldInput struct {
_ struct{} `type:"structure" payload:"LegalHold"`
_ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"`
// The bucket containing the object that you want to place a Legal Hold on.
//
@ -20791,7 +20791,7 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
}
type PutObjectLockConfigurationInput struct {
_ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
_ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"`
// The bucket whose object lock configuration you want to create or replace.
//
@ -20998,7 +20998,7 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
}
type PutObjectRetentionInput struct {
_ struct{} `type:"structure" payload:"Retention"`
_ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"`
// The bucket that contains the object you want to apply this Object Retention
// configuration to.
@ -21129,7 +21129,7 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti
}
type PutObjectTaggingInput struct {
_ struct{} `type:"structure" payload:"Tagging"`
_ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -21237,7 +21237,7 @@ func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput
}
type PutPublicAccessBlockInput struct {
_ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"`
_ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"`
// The name of the Amazon S3 bucket whose PublicAccessBlock configuration you
// want to set.
@ -21999,7 +21999,7 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress {
}
type RestoreObjectInput struct {
_ struct{} `type:"structure" payload:"RestoreRequest"`
_ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -22464,6 +22464,41 @@ func (s SSES3) GoString() string {
return s.String()
}
type ScanRange struct {
_ struct{} `type:"structure"`
// Specifies the end of the byte range. This parameter is optional. Valid values:
// non-negative integers. The default value is one less than the size of the
// object being queried.
End *int64 `type:"long"`
// Specifies the start of the byte range. This parameter is optional. Valid
// values: non-negative integers. The default value is 0.
Start *int64 `type:"long"`
}
// String returns the string representation
func (s ScanRange) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ScanRange) GoString() string {
return s.String()
}
// SetEnd sets the End field's value.
func (s *ScanRange) SetEnd(v int64) *ScanRange {
s.End = &v
return s
}
// SetStart sets the Start field's value.
func (s *ScanRange) SetStart(v int64) *ScanRange {
s.Start = &v
return s
}
// SelectObjectContentEventStream provides handling of EventStreams for
// the SelectObjectContent API.
//
@ -22503,6 +22538,8 @@ type SelectObjectContentEventStream struct {
// may result in resource leaks.
func (es *SelectObjectContentEventStream) Close() (err error) {
es.Reader.Close()
es.StreamCloser.Close()
return es.Err()
}
@ -22512,8 +22549,6 @@ func (es *SelectObjectContentEventStream) Err() error {
if err := es.Reader.Err(); err != nil {
return err
}
es.StreamCloser.Close()
return nil
}
@ -22738,6 +22773,12 @@ type SelectObjectContentInput struct {
// The SSE Customer Key MD5. For more information, see Server-Side Encryption
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the byte range of the object to get the records from. A record
// is processed when its first byte is contained by the range. This parameter
// is optional, but when specified, it must not be empty. See RFC 2616, Section
// 14.35.1 about how to specify the start and end of the range.
ScanRange *ScanRange `type:"structure"`
}
// String returns the string representation
@ -22858,6 +22899,12 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC
return s
}
// SetScanRange sets the ScanRange field's value.
func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput {
s.ScanRange = v
return s
}
type SelectObjectContentOutput struct {
_ struct{} `type:"structure" payload:"Payload"`
@ -23715,7 +23762,7 @@ func (s *Transition) SetStorageClass(v string) *Transition {
}
type UploadPartCopyInput struct {
_ struct{} `type:"structure"`
_ struct{} `locationName:"UploadPartCopyRequest" type:"structure"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -24045,7 +24092,7 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy
}
type UploadPartInput struct {
_ struct{} `type:"structure" payload:"Body"`
_ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"`
// Object data.
Body io.ReadSeeker `type:"blob"`
@ -24657,6 +24704,9 @@ const (
// InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value
InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus"
// InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value
InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier"
)
const (

View file

@ -0,0 +1,81 @@
package s3manager
import (
"io"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// BufferedReadSeeker is buffered io.ReadSeeker
type BufferedReadSeeker struct {
r io.ReadSeeker
buffer []byte
readIdx, writeIdx int
}
// NewBufferedReadSeeker returns a new BufferedReadSeeker
// if len(b) == 0 then the buffer will be initialized to 64 KiB.
func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
if len(b) == 0 {
b = make([]byte, 64*1024)
}
return &BufferedReadSeeker{r: r, buffer: b}
}
func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
b.r = r
b.readIdx, b.writeIdx = 0, 0
}
// Read will read up len(p) bytes into p and will return
// the number of bytes read and any error that occurred.
// If the len(p) > the buffer size then a single read request
// will be issued to the underlying io.ReadSeeker for len(p) bytes.
// A Read request will at most perform a single Read to the underlying
// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return n, err
}
if b.readIdx == b.writeIdx {
if len(p) >= len(b.buffer) {
n, err = b.r.Read(p)
return n, err
}
b.readIdx, b.writeIdx = 0, 0
n, err = b.r.Read(b.buffer)
if n == 0 {
return n, err
}
b.writeIdx += n
}
n = copy(p, b.buffer[b.readIdx:b.writeIdx])
b.readIdx += n
return n, err
}
// Seek will position then underlying io.ReadSeeker to the given offset
// and will clear the buffer.
func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
n, err := b.r.Seek(offset, whence)
b.reset(b.r)
return n, err
}
// ReadAt will read up to len(p) bytes at the given file offset.
// This will result in the buffer being cleared.
func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
_, err := b.Seek(off, sdkio.SeekStart)
if err != nil {
return 0, err
}
return b.Read(p)
}

View file

@ -0,0 +1,7 @@
// +build !windows
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return nil
}

View file

@ -0,0 +1,5 @@
package s3manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return NewBufferedReadSeekerWriteToPool(1024 * 1024)
}

View file

@ -0,0 +1,7 @@
// +build !windows
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return nil
}

View file

@ -0,0 +1,5 @@
package s3manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
}

View file

@ -25,13 +25,25 @@ const DefaultDownloadPartSize = 1024 * 1024 * 5
// when using Download().
const DefaultDownloadConcurrency = 5
type errReadingBody struct {
err error
}
func (e *errReadingBody) Error() string {
return fmt.Sprintf("failed to read part body: %v", e.err)
}
func (e *errReadingBody) Unwrap() error {
return e.err
}
// The Downloader structure that calls Download(). It is safe to call Download()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Downloader's properties is not safe to be done concurrently.
type Downloader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultDownloadPartSize value will be used.
// The size (in bytes) to request from S3 for each part.
// The minimum allowed part size is 5MB, and if this value is set to zero,
// the DefaultDownloadPartSize value will be used.
//
// PartSize is ignored if the Range input parameter is provided.
PartSize int64
@ -50,6 +62,14 @@ type Downloader struct {
// List of request options that will be passed down to individual API
// operation requests made by the downloader.
RequestOptions []request.Option
// Defines the buffer strategy used when downloading a part.
//
// If a WriterReadFromProvider is given the Download manager
// will pass the io.WriterAt of the Download request to the provider
// and will use the returned WriterReadFrom from the provider as the
// destination writer when copying from http response body.
BufferProvider WriterReadFromProvider
}
// WithDownloaderRequestOptions appends to the Downloader's API request options.
@ -77,10 +97,15 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
return newDownloader(s3.New(c), options...)
}
func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
d := &Downloader{
S3: s3.New(c),
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
S3: client,
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
BufferProvider: defaultDownloadBufferProvider(),
}
for _, option := range options {
option(d)
@ -109,16 +134,7 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
d := &Downloader{
S3: svc,
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
}
for _, option := range options {
option(d)
}
return d
return newDownloader(svc, options...)
}
type maxRetrier interface {
@ -405,18 +421,20 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
var resp *s3.GetObjectOutput
resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
if err != nil {
return err
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err = io.Copy(&chunk, resp.Body)
resp.Body.Close()
n, err = d.tryDownloadChunk(in, &chunk)
if err == nil {
break
}
// Check if the returned error is an errReadingBody.
// If err is errReadingBody this indicates that an error
// occurred while copying the http response body.
// If this occurs we unwrap the err to set the underlying error
// and attempt any remaining retries.
if bodyErr, ok := err.(*errReadingBody); ok {
err = bodyErr.Unwrap()
} else {
return err
}
chunk.cur = 0
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
@ -429,6 +447,28 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
return err
}
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
cleanup := func() {}
if d.cfg.BufferProvider != nil {
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
}
defer cleanup()
resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
if err != nil {
return 0, err
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err := io.Copy(w, resp.Body)
resp.Body.Close()
if err != nil {
return n, &errReadingBody{err: err}
}
return n, nil
}
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
s, ok := svc.(*s3.S3)
if !ok {

View file

@ -0,0 +1,65 @@
package s3manager
import (
"io"
"sync"
)
// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
type ReadSeekerWriteTo interface {
io.ReadSeeker
io.WriterTo
}
// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
// implementation.
type BufferedReadSeekerWriteTo struct {
*BufferedReadSeeker
}
// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
// an error occurs. Returns the number of bytes written and any error encountered during the write.
func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
return io.Copy(writer, b.BufferedReadSeeker)
}
// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
type ReadSeekerWriteToProvider interface {
GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
}
// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
// []byte slices for buffering parts in memory
type BufferedReadSeekerWriteToPool struct {
pool sync.Pool
}
// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
// respectively will default to copying 32 KiB.
func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
if size < 65536 {
size = 65536
}
return &BufferedReadSeekerWriteToPool{
pool: sync.Pool{New: func() interface{} {
return make([]byte, size)
}},
}
}
// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
// The provided cleanup must be called after operations have been completed on the
// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
buffer := p.pool.Get().([]byte)
r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
cleanup = func() {
p.pool.Put(buffer)
}
return r, cleanup
}

View file

@ -162,6 +162,12 @@ type Uploader struct {
// List of request options that will be passed down to individual API
// operation requests made by the uploader.
RequestOptions []request.Option
// Defines the buffer strategy used when uploading a part
BufferProvider ReadSeekerWriteToProvider
// partPool allows for the re-usage of streaming payload part buffers between upload calls
partPool *partPool
}
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
@ -181,18 +187,25 @@ type Uploader struct {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
return newUploader(s3.New(c), options...)
}
func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: s3.New(c),
S3: client,
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
BufferProvider: defaultUploadBufferProvider(),
}
for _, option := range options {
option(u)
}
u.partPool = newPartPool(u.PartSize)
return u
}
@ -215,19 +228,7 @@ func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: svc,
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
}
for _, option := range options {
option(u)
}
return u
return newUploader(svc, options...)
}
// Upload uploads an object to S3, intelligently buffering large files into
@ -287,6 +288,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
for _, opt := range opts {
opt(&i.cfg)
}
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
return i.upload()
@ -356,8 +358,6 @@ type uploader struct {
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
bufferPool sync.Pool
}
// internal logic for deciding whether to upload a single part or use a
@ -373,15 +373,16 @@ func (u *uploader) upload() (*UploadOutput, error) {
}
// Do one read to determine if we have more than one part
reader, _, part, err := u.nextReader()
reader, _, cleanup, err := u.nextReader()
if err == io.EOF { // single part
return u.singlePart(reader)
return u.singlePart(reader, cleanup)
} else if err != nil {
cleanup()
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
}
mu := multiuploader{uploader: u}
return mu.upload(reader, part)
return mu.upload(reader, cleanup)
}
// init will initialize all default options.
@ -396,8 +397,10 @@ func (u *uploader) init() error {
u.cfg.MaxUploadParts = MaxUploadParts
}
u.bufferPool = sync.Pool{
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
// so that we return []byte slices of the correct size
if u.cfg.partPool == nil || u.cfg.partPool.partSize != u.cfg.PartSize {
u.cfg.partPool = newPartPool(u.cfg.PartSize)
}
// Try to get the total size for some optimizations
@ -433,7 +436,7 @@ func (u *uploader) initSize() error {
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
@ -452,17 +455,32 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
}
}
reader := io.NewSectionReader(r, u.readerPos, n)
var (
reader io.ReadSeeker
cleanup func()
)
reader = io.NewSectionReader(r, u.readerPos, n)
if u.cfg.BufferProvider != nil {
reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
} else {
cleanup = func() {}
}
u.readerPos += n
return reader, int(n), nil, err
return reader, int(n), cleanup, err
default:
part := u.bufferPool.Get().([]byte)
part := u.cfg.partPool.Get().([]byte)
n, err := readFillBuf(r, part)
u.readerPos += int64(n)
return bytes.NewReader(part[0:n]), n, part, err
cleanup := func() {
u.cfg.partPool.Put(part)
}
return bytes.NewReader(part[0:n]), n, cleanup, err
}
}
@ -479,10 +497,12 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
defer cleanup()
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
params.Body = buf
params.Body = r
// Need to use request form because URL generated in request is
// used in return.
@ -512,9 +532,9 @@ type multiuploader struct {
// keeps track of a single chunk of data being sent to S3.
type chunk struct {
buf io.ReadSeeker
part []byte
num int64
buf io.ReadSeeker
num int64
cleanup func()
}
// completedParts is a wrapper to make parts sortable by their part number,
@ -527,7 +547,7 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
@ -547,46 +567,29 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
// Send part 1 to the workers
var num int64 = 1
ch <- chunk{buf: firstBuf, part: firstPart, num: num}
ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
// Read and queue the rest of the parts
for u.geterr() == nil && err == nil {
var reader io.ReadSeeker
var nextChunkLen int
var part []byte
reader, nextChunkLen, part, err = u.nextReader()
var (
reader io.ReadSeeker
nextChunkLen int
ok bool
)
if err != nil && err != io.EOF {
u.seterr(awserr.New(
"ReadRequestBody",
"read multipart upload data failed",
err))
break
}
if nextChunkLen == 0 {
// No need to upload empty part, if file was empty to start
// with empty single part would of been created and never
// started multipart upload.
reader, nextChunkLen, cleanup, err = u.nextReader()
ok, err = u.shouldContinue(num, nextChunkLen, err)
if !ok {
cleanup()
if err != nil {
u.seterr(err)
}
break
}
num++
// This upload exceeded maximum number of supported parts, error now.
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
var msg string
if num > int64(u.cfg.MaxUploadParts) {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.cfg.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
break
}
ch <- chunk{buf: reader, part: part, num: num}
ch <- chunk{buf: reader, num: num, cleanup: cleanup}
}
// Close the channel, wait for workers, and complete upload
@ -620,6 +623,35 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*Uploa
}, nil
}
func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
if err != nil && err != io.EOF {
return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
}
if nextChunkLen == 0 {
// No need to upload empty part, if file was empty to start
// with empty single part would of been created and never
// started multipart upload.
return false, nil
}
part++
// This upload exceeded maximum number of supported parts, error now.
if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
var msg string
if part > int64(u.cfg.MaxUploadParts) {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.cfg.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
return false, awserr.New("TotalPartsExceeded", msg, nil)
}
return true, err
}
// readChunk runs in worker goroutines to pull chunks off of the ch channel
// and send() them as UploadPart requests.
func (u *multiuploader) readChunk(ch chan chunk) {
@ -651,9 +683,9 @@ func (u *multiuploader) send(c chunk) error {
SSECustomerKey: u.in.SSECustomerKey,
PartNumber: &c.num,
}
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
// put the byte array back into the pool to conserve memory
u.bufferPool.Put(c.part)
c.cleanup()
if err != nil {
return err
}
@ -725,3 +757,18 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
return resp
}
type partPool struct {
partSize int64
sync.Pool
}
func newPartPool(partSize int64) *partPool {
p := &partPool{partSize: partSize}
p.New = func() interface{} {
return make([]byte, p.partSize)
}
return p
}

View file

@ -12,7 +12,7 @@ import (
// package's PutObjectInput with the exception that the Body member is an
// io.Reader instead of an io.ReadSeeker.
type UploadInput struct {
_ struct{} `type:"structure" payload:"Body"`
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`

View file

@ -0,0 +1,75 @@
package s3manager
import (
"bufio"
"io"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
type WriterReadFrom interface {
io.Writer
io.ReaderFrom
}
// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
type WriterReadFromProvider interface {
GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
}
type bufferedWriter interface {
WriterReadFrom
Flush() error
Reset(io.Writer)
}
type bufferedReadFrom struct {
bufferedWriter
}
func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
n, err := b.bufferedWriter.ReadFrom(r)
if flushErr := b.Flush(); flushErr != nil && err == nil {
err = flushErr
}
return n, err
}
// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
// to manage allocation and reuse of *bufio.Writer structures.
type PooledBufferedReadFromProvider struct {
pool sync.Pool
}
// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
// Size is used to control the size of the underlying *bufio.Writer created for
// calls to GetReadFrom.
func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
if size < int(32*sdkio.KibiByte) {
size = int(64 * sdkio.KibiByte)
}
return &PooledBufferedReadFromProvider{
pool: sync.Pool{
New: func() interface{} {
return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
},
},
}
}
// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
// has been completed in order to allow the reuse of the *bufio.Writer
func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
buffer := p.pool.Get().(*bufferedReadFrom)
buffer.Reset(writer)
r = buffer
cleanup = func() {
buffer.Reset(nil) // Reset to nil writer to release reference
p.pool.Put(buffer)
}
return r, cleanup
}

View file

@ -46,11 +46,11 @@ const (
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2006-03-01",
},

View file

@ -676,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
//
// Returned Error Codes:
// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
// The error returned if the message passed to DecodeAuthorizationMessage was
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
// This error is returned if the message passed to DecodeAuthorizationMessage
// was invalid. This can happen if the token contains invalid characters, such
// as linebreaks.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {

View file

@ -34,9 +34,9 @@ const (
// ErrCodeInvalidAuthorizationMessageException for service response error code
// "InvalidAuthorizationMessageException".
//
// The error returned if the message passed to DecodeAuthorizationMessage was
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
// This error is returned if the message passed to DecodeAuthorizationMessage
// was invalid. This can happen if the token contains invalid characters, such
// as linebreaks.
ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
// ErrCodeInvalidIdentityTokenException for service response error code

View file

@ -46,11 +46,11 @@ const (
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(EndpointsID, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS {
svc := &STS{
Client: client.New(
cfg,
@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},