s3: support metadata setting and mapping on server side Copy
Before this change the backend would not run the metadata mapper and it would ignore metadata set when doing server side copies.
This commit is contained in:
parent
9f2ce2c7fc
commit
bda4f25baa
3 changed files with 59 additions and 4 deletions
|
@ -98,4 +98,5 @@ import "github.com/aws/aws-sdk-go/service/s3"
|
||||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
||||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
||||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
||||||
|
genSetFrom(new(s3.CopyObjectInput), new(s3.PutObjectInput))
|
||||||
}
|
}
|
||||||
|
|
|
@ -4591,10 +4591,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
srcBucket, srcPath := srcObj.split()
|
srcBucket, srcPath := srcObj.split()
|
||||||
req := s3.CopyObjectInput{
|
req := s3.CopyObjectInput{
|
||||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the metadata if it is in use
|
||||||
|
if ci := fs.GetConfig(ctx); ci.Metadata {
|
||||||
|
ui, err := srcObj.prepareUpload(ctx, src, fs.MetadataAsOpenOptions(ctx), true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||||
|
}
|
||||||
|
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
||||||
|
req.MetadataDirective = aws.String(s3.MetadataDirectiveReplace)
|
||||||
|
}
|
||||||
|
|
||||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj)
|
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -5697,7 +5709,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
ui, err := o.prepareUpload(ctx, src, options)
|
ui, err := o.prepareUpload(ctx, src, options, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -6064,7 +6076,9 @@ type uploadInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare object for being uploaded
|
// Prepare object for being uploaded
|
||||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
//
|
||||||
|
// If noHash is true the md5sum will not be calculated
|
||||||
|
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, noHash bool) (ui uploadInfo, err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
// Create parent dir/bucket if not saving directory marker
|
// Create parent dir/bucket if not saving directory marker
|
||||||
if !strings.HasSuffix(o.remote, "/") {
|
if !strings.HasSuffix(o.remote, "/") {
|
||||||
|
@ -6138,7 +6152,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||||
var md5sumBase64 string
|
var md5sumBase64 string
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||||
if !multipart || !o.fs.opt.DisableChecksum {
|
if !noHash && (!multipart || !o.fs.opt.DisableChecksum) {
|
||||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||||
|
@ -6250,7 +6264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
if multipart {
|
if multipart {
|
||||||
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in, options...)
|
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in, options...)
|
||||||
} else {
|
} else {
|
||||||
ui, err = o.prepareUpload(ctx, src, options)
|
ui, err = o.prepareUpload(ctx, src, options, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *
|
||||||
a.EncodingType = b.EncodingType
|
a.EncodingType = b.EncodingType
|
||||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
a.MaxKeys = b.MaxKeys
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.OptionalObjectAttributes = b.OptionalObjectAttributes
|
||||||
a.Prefix = b.Prefix
|
a.Prefix = b.Prefix
|
||||||
a.RequestPayer = b.RequestPayer
|
a.RequestPayer = b.RequestPayer
|
||||||
}
|
}
|
||||||
|
@ -25,6 +26,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
||||||
a.MaxKeys = b.MaxKeys
|
a.MaxKeys = b.MaxKeys
|
||||||
a.Name = b.Name
|
a.Name = b.Name
|
||||||
a.Prefix = b.Prefix
|
a.Prefix = b.Prefix
|
||||||
|
a.RequestCharged = b.RequestCharged
|
||||||
}
|
}
|
||||||
|
|
||||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||||
|
@ -34,7 +36,9 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
||||||
a.EncodingType = b.EncodingType
|
a.EncodingType = b.EncodingType
|
||||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
a.MaxKeys = b.MaxKeys
|
a.MaxKeys = b.MaxKeys
|
||||||
|
a.OptionalObjectAttributes = b.OptionalObjectAttributes
|
||||||
a.Prefix = b.Prefix
|
a.Prefix = b.Prefix
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
}
|
}
|
||||||
|
|
||||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||||
|
@ -55,6 +59,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
||||||
a.MaxKeys = b.MaxKeys
|
a.MaxKeys = b.MaxKeys
|
||||||
a.Name = b.Name
|
a.Name = b.Name
|
||||||
a.Prefix = b.Prefix
|
a.Prefix = b.Prefix
|
||||||
|
a.RequestCharged = b.RequestCharged
|
||||||
}
|
}
|
||||||
|
|
||||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||||
|
@ -64,6 +69,7 @@ func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||||
a.Key = b.Key
|
a.Key = b.Key
|
||||||
a.LastModified = b.LastModified
|
a.LastModified = b.LastModified
|
||||||
a.Owner = b.Owner
|
a.Owner = b.Owner
|
||||||
|
a.RestoreStatus = b.RestoreStatus
|
||||||
a.Size = b.Size
|
a.Size = b.Size
|
||||||
a.StorageClass = b.StorageClass
|
a.StorageClass = b.StorageClass
|
||||||
}
|
}
|
||||||
|
@ -237,3 +243,37 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
||||||
a.StorageClass = b.StorageClass
|
a.StorageClass = b.StorageClass
|
||||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||||
|
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||||
|
a.ACL = b.ACL
|
||||||
|
a.Bucket = b.Bucket
|
||||||
|
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||||
|
a.CacheControl = b.CacheControl
|
||||||
|
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||||
|
a.ContentDisposition = b.ContentDisposition
|
||||||
|
a.ContentEncoding = b.ContentEncoding
|
||||||
|
a.ContentLanguage = b.ContentLanguage
|
||||||
|
a.ContentType = b.ContentType
|
||||||
|
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||||
|
a.Expires = b.Expires
|
||||||
|
a.GrantFullControl = b.GrantFullControl
|
||||||
|
a.GrantRead = b.GrantRead
|
||||||
|
a.GrantReadACP = b.GrantReadACP
|
||||||
|
a.GrantWriteACP = b.GrantWriteACP
|
||||||
|
a.Key = b.Key
|
||||||
|
a.Metadata = b.Metadata
|
||||||
|
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||||
|
a.ObjectLockMode = b.ObjectLockMode
|
||||||
|
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||||
|
a.RequestPayer = b.RequestPayer
|
||||||
|
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||||
|
a.SSECustomerKey = b.SSECustomerKey
|
||||||
|
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||||
|
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||||
|
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||||
|
a.ServerSideEncryption = b.ServerSideEncryption
|
||||||
|
a.StorageClass = b.StorageClass
|
||||||
|
a.Tagging = b.Tagging
|
||||||
|
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue