Add v2 signature signing to S3 storage driver (#1800)

* Add v2 signature signing to S3 storage driver

Closes #1796
Closes #1606

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* use Logrus for debug logging

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* use 'date' instead of 'x-amz-date' in request header

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>

* only allow v4 signature signing against AWS S3

Signed-off-by: Fabio Berchtold <fabio.berchtold@swisscom.com>
This commit is contained in:
Fabio Berchtold 2016-09-01 22:52:40 +02:00 committed by Richard Scothern
parent 49da29ee46
commit 7dcac52f18
4 changed files with 299 additions and 19 deletions

View file

@ -122,8 +122,11 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Amaz
</td> </td>
<td> <td>
Indicates whether the registry uses Version 4 of AWS's authentication. Indicates whether the registry uses Version 4 of AWS's authentication.
Generally, you should set this to <code>true</code>. By default, this is Generally, you should set this to <code>true</code> unless you are using an
<code>false</code>. S3 compatible provider that does not support v4 signature signing.
If you set this to <code>false</code> then the storage driver will use v2 signature signing.
By default, this is <code>true</code>.
You can not use v2 signing if you are using AWS S3.
</td> </td>
</tr> </tr>
<tr> <tr>
@ -228,13 +231,13 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Amaz
`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns.
`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) `v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to true if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false)
`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes.
`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root).
`storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are STANDARD and REDUCED_REDUNDANCY. `storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are NONE, STANDARD and REDUCED_REDUNDANCY. Use NONE if your S3 compatible provider does not support storage classes.
`objectacl`: (optional) The canned object ACL to be applied to each registry object. Defaults to `private`. If you are using a bucket owned by another AWS account, it is recommended that you set this to `bucket-owner-full-control` so that the bucket owner can access your objects. Other valid options are available in the [AWS S3 documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl). `objectacl`: (optional) The canned object ACL to be applied to each registry object. Defaults to `private`. If you are using a bucket owned by another AWS account, it is recommended that you set this to `bucket-owner-full-control` so that the bucket owner can access your objects. Other valid options are available in the [AWS S3 documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl).

View file

@ -70,6 +70,9 @@ const (
// listMax is the largest amount of objects you can request from S3 in a list call // listMax is the largest amount of objects you can request from S3 in a list call
const listMax = 1000 const listMax = 1000
// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint
const noStorageClass = "NONE"
// validRegions maps known s3 region identifiers to region descriptors // validRegions maps known s3 region identifiers to region descriptors
var validRegions = map[string]struct{}{} var validRegions = map[string]struct{}{}
@ -86,6 +89,7 @@ type DriverParameters struct {
Encrypt bool Encrypt bool
KeyID string KeyID string
Secure bool Secure bool
V4Auth bool
ChunkSize int64 ChunkSize int64
MultipartCopyChunkSize int64 MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64 MultipartCopyMaxConcurrency int64
@ -238,6 +242,23 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
return nil, fmt.Errorf("The secure parameter should be a boolean") return nil, fmt.Errorf("The secure parameter should be a boolean")
} }
v4Bool := true
v4auth := parameters["v4auth"]
switch v4auth := v4auth.(type) {
case string:
b, err := strconv.ParseBool(v4auth)
if err != nil {
return nil, fmt.Errorf("The v4auth parameter should be a boolean")
}
v4Bool = b
case bool:
v4Bool = v4auth
case nil:
// do nothing
default:
return nil, fmt.Errorf("The v4auth parameter should be a boolean")
}
keyID := parameters["keyid"] keyID := parameters["keyid"]
if keyID == nil { if keyID == nil {
keyID = "" keyID = ""
@ -273,12 +294,16 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
if storageClassParam != nil { if storageClassParam != nil {
storageClassString, ok := storageClassParam.(string) storageClassString, ok := storageClassParam.(string)
if !ok { if !ok {
return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid",
[]string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
} }
// All valid storage class parameters are UPPERCASE, so be a bit more flexible here // All valid storage class parameters are UPPERCASE, so be a bit more flexible here
storageClassString = strings.ToUpper(storageClassString) storageClassString = strings.ToUpper(storageClassString)
if storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { if storageClassString != noStorageClass &&
return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) storageClassString != s3.StorageClassStandard &&
storageClassString != s3.StorageClassReducedRedundancy {
return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid",
[]string{noStorageClass, s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam)
} }
storageClass = storageClassString storageClass = storageClassString
} }
@ -311,6 +336,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
encryptBool, encryptBool,
fmt.Sprint(keyID), fmt.Sprint(keyID),
secureBool, secureBool,
v4Bool,
chunkSize, chunkSize,
multipartCopyChunkSize, multipartCopyChunkSize,
multipartCopyMaxConcurrency, multipartCopyMaxConcurrency,
@ -356,22 +382,39 @@ func getParameterAsInt64(parameters map[string]interface{}, name string, default
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and // New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName // bucketName
func New(params DriverParameters) (*Driver, error) { func New(params DriverParameters) (*Driver, error) {
if !params.V4Auth &&
(params.RegionEndpoint == "" ||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
return nil, fmt.Errorf("On Amazon S3 this storage driver can only be used with v4 authentication")
}
awsConfig := aws.NewConfig() awsConfig := aws.NewConfig()
if params.RegionEndpoint != "" { var creds *credentials.Credentials
if params.RegionEndpoint == "" {
creds = credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
})
} else {
creds = credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
})
awsConfig.WithS3ForcePathStyle(true) awsConfig.WithS3ForcePathStyle(true)
awsConfig.WithEndpoint(params.RegionEndpoint) awsConfig.WithEndpoint(params.RegionEndpoint)
} }
creds := credentials.NewChainCredentials([]credentials.Provider{
&credentials.StaticProvider{
Value: credentials.Value{
AccessKeyID: params.AccessKey,
SecretAccessKey: params.SecretKey,
},
},
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())},
})
awsConfig.WithCredentials(creds) awsConfig.WithCredentials(creds)
awsConfig.WithRegion(params.Region) awsConfig.WithRegion(params.Region)
@ -385,6 +428,11 @@ func New(params DriverParameters) (*Driver, error) {
s3obj := s3.New(session.New(awsConfig)) s3obj := s3.New(session.New(awsConfig))
// enable S3 compatible signature v2 signing instead
if !params.V4Auth {
setv2Handlers(s3obj)
}
// TODO Currently multipart uploads have no timestamps, so this would be unwise // TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket. // if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "") // multis, _, err := bucket.ListMulti("", "")
@ -868,6 +916,9 @@ func (d *driver) getACL() *string {
} }
func (d *driver) getStorageClass() *string { func (d *driver) getStorageClass() *string {
if d.StorageClass == noStorageClass {
return nil
}
return aws.String(d.StorageClass) return aws.String(d.StorageClass)
} }

View file

@ -31,6 +31,7 @@ func init() {
encrypt := os.Getenv("S3_ENCRYPT") encrypt := os.Getenv("S3_ENCRYPT")
keyID := os.Getenv("S3_KEY_ID") keyID := os.Getenv("S3_KEY_ID")
secure := os.Getenv("S3_SECURE") secure := os.Getenv("S3_SECURE")
v4Auth := os.Getenv("S3_V4_AUTH")
region := os.Getenv("AWS_REGION") region := os.Getenv("AWS_REGION")
objectAcl := os.Getenv("S3_OBJECT_ACL") objectAcl := os.Getenv("S3_OBJECT_ACL")
root, err := ioutil.TempDir("", "driver-") root, err := ioutil.TempDir("", "driver-")
@ -57,6 +58,14 @@ func init() {
} }
} }
v4Bool := true
if v4Auth != "" {
v4Bool, err = strconv.ParseBool(v4Auth)
if err != nil {
return nil, err
}
}
parameters := DriverParameters{ parameters := DriverParameters{
accessKey, accessKey,
secretKey, secretKey,
@ -66,6 +75,7 @@ func init() {
encryptBool, encryptBool,
keyID, keyID,
secureBool, secureBool,
v4Bool,
minChunkSize, minChunkSize,
defaultMultipartCopyChunkSize, defaultMultipartCopyChunkSize,
defaultMultipartCopyMaxConcurrency, defaultMultipartCopyMaxConcurrency,
@ -163,6 +173,10 @@ func TestStorageClass(t *testing.T) {
t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err)
} }
if _, err = s3DriverConstructor(rootDir, noStorageClass); err != nil {
t.Fatalf("unexpected error creating driver without storage class: %v", err)
}
standardFilename := "/test-standard" standardFilename := "/test-standard"
rrFilename := "/test-rr" rrFilename := "/test-rr"
contents := []byte("contents") contents := []byte("contents")

View file

@ -0,0 +1,212 @@
package s3
// Source: https://github.com/pivotal-golang/s3cli
// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"net/url"
"sort"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
)
const (
signatureVersion = "2"
signatureMethod = "HmacSHA1"
timeFormat = "2006-01-02T15:04:05Z"
)
type signer struct {
// Values that must be populated from the request
Request *http.Request
Time time.Time
Credentials *credentials.Credentials
Query url.Values
stringToSign string
signature string
}
var s3ParamsToSign = map[string]bool{
"acl": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"website": true,
"delete": true,
}
// setv2Handlers will setup v2 signature signing on the S3 driver
func setv2Handlers(svc *s3.S3) {
svc.Handlers.Build.PushBack(func(r *request.Request) {
parsedURL, err := url.Parse(r.HTTPRequest.URL.String())
if err != nil {
log.Fatalf("Failed to parse URL: %v", err)
}
r.HTTPRequest.URL.Opaque = parsedURL.Path
})
svc.Handlers.Sign.Clear()
svc.Handlers.Sign.PushBack(Sign)
svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
}
// Sign requests with signature version 2.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
v2 := signer{
Request: req.HTTPRequest,
Time: req.Time,
Credentials: req.Config.Credentials,
}
v2.Sign()
}
func (v2 *signer) Sign() error {
credValue, err := v2.Credentials.Get()
if err != nil {
return err
}
accessKey := credValue.AccessKeyID
var (
md5, ctype, date, xamz string
xamzDate bool
sarray []string
)
headers := v2.Request.Header
params := v2.Request.URL.Query()
parsedURL, err := url.Parse(v2.Request.URL.String())
if err != nil {
return err
}
host, canonicalPath := parsedURL.Host, parsedURL.Path
v2.Request.Header["Host"] = []string{host}
v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)}
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
sarray = append(sarray, k+":"+vall)
if k == "x-amz-date" {
xamzDate = true
date = ""
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{accessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
v2.stringToSign = strings.Join([]string{
v2.Request.Method,
md5,
ctype,
date,
xamz + canonicalPath,
}, "\n")
hash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey))
hash.Write([]byte(v2.stringToSign))
v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))
if expires {
params["Signature"] = []string{string(v2.signature)}
} else {
headers["Authorization"] = []string{"AWS " + accessKey + ":" + string(v2.signature)}
}
log.WithFields(log.Fields{
"string-to-sign": v2.stringToSign,
"signature": v2.signature,
}).Debugln("request signature")
return nil
}