Merge pull request #285 from howeyc/fix-aws-v4

Use new version of s3 library, Fixes #276
This commit is contained in:
Alexander Neumann 2015-08-26 20:20:32 +02:00
commit cb460b7dec
36 changed files with 2326 additions and 3014 deletions

24
Godeps/Godeps.json generated
View file

@ -22,14 +22,6 @@
"ImportPath": "github.com/kr/fs",
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
},
{
"ImportPath": "github.com/mitchellh/goamz/aws",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/mitchellh/goamz/s3",
"Rev": "caaaea8b30ee15616494ee68abd5d8ebbbef05cf"
},
{
"ImportPath": "github.com/pkg/sftp",
"Rev": "518aed2757a65cfa64d4b1b2baf08410f8b7a6bc"
@ -38,10 +30,6 @@
"ImportPath": "github.com/restic/chunker",
"Rev": "e795b80f4c927ebcf2687ce18bcf1a39fee740b1"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
@ -57,6 +45,18 @@
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "cc04154d65fb9296747569b107cfd05380b1ea3e"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "7654728e381988afd88e58cabfd6363a5ea91810"
},
{
"ImportPath": "gopkg.in/amz.v3/aws",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
},
{
"ImportPath": "gopkg.in/amz.v3/s3",
"Rev": "bff3a097c4108da57bb8cbe3aad2990d74d23676"
}
]
}

View file

@ -1,445 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package aws
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/vaughan0/go-ini"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
ELBEndpoint string
AutoScalingEndpoint string
RdsEndpoint string
Route53Endpoint string
}
var USGovWest = Region{
"us-gov-west-1",
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-fips-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
"https://autoscaling.us-gov-west-1.amazonaws.com",
"https://rds.us-gov-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USEast = Region{
"us-east-1",
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-east-1.amazonaws.com",
"https://autoscaling.us-east-1.amazonaws.com",
"https://rds.us-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest = Region{
"us-west-1",
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-1.amazonaws.com",
"https://autoscaling.us-west-1.amazonaws.com",
"https://rds.us-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var USWest2 = Region{
"us-west-2",
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.us-west-2.amazonaws.com",
"https://autoscaling.us-west-2.amazonaws.com",
"https://rds.us-west-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUWest = Region{
"eu-west-1",
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
"https://autoscaling.eu-west-1.amazonaws.com",
"https://rds.eu-west-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1",
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
"https://autoscaling.eu-central-1.amazonaws.com",
"https://rds.eu-central-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1",
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
"https://autoscaling.ap-southeast-1.amazonaws.com",
"https://rds.ap-southeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2",
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
"https://autoscaling.ap-southeast-2.amazonaws.com",
"https://rds.ap-southeast-2.amazonaws.com",
"https://route53.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1",
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
"https://autoscaling.ap-northeast-1.amazonaws.com",
"https://rds.ap-northeast-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var SAEast = Region{
"sa-east-1",
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
"https://autoscaling.sa-east-1.amazonaws.com",
"https://rds.sa-east-1.amazonaws.com",
"https://route53.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1",
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
"https://autoscaling.cn-north-1.amazonaws.com.cn",
"https://rds.cn-north-1.amazonaws.com.cn",
"https://route53.amazonaws.com",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
SAEast.Name: SAEast,
USGovWest.Name: USGovWest,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey, Token string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
type credentials struct {
Code string
LastUpdated string
Type string
AccessKeyId string
SecretAccessKey string
Token string
Expiration string
}
// GetMetaData retrieves instance metadata about the current machine.
//
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
func GetMetaData(path string) (contents []byte, err error) {
url := "http://169.254.169.254/latest/meta-data/" + path
resp, err := RetryingClient.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
return
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return []byte(body), err
}
func getInstanceCredentials() (cred credentials, err error) {
credentialPath := "iam/security-credentials/"
// Get the instance role
role, err := GetMetaData(credentialPath)
if err != nil {
return
}
// Get the instance role credentials
credentialJSON, err := GetMetaData(credentialPath + string(role))
if err != nil {
return
}
err = json.Unmarshal([]byte(credentialJSON), &cred)
return
}
// GetAuth creates an Auth based on either passed in credentials,
// environment information or instance based role credentials.
func GetAuth(accessKey string, secretKey string) (auth Auth, err error) {
// First try passed in credentials
if accessKey != "" && secretKey != "" {
return Auth{accessKey, secretKey, ""}, nil
}
// Next try to get auth from the environment
auth, err = SharedAuth()
if err == nil {
// Found auth, return
return
}
// Next try to get auth from the environment
auth, err = EnvAuth()
if err == nil {
// Found auth, return
return
}
// Next try getting auth from the instance role
cred, err := getInstanceCredentials()
if err == nil {
// Found auth, return
auth.AccessKey = cred.AccessKeyId
auth.SecretKey = cred.SecretAccessKey
auth.Token = cred.Token
return
}
err = errors.New("No valid AWS authentication found")
return
}
// SharedAuth creates an Auth based on shared credentials stored in
// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to
// select the profile.
func SharedAuth() (auth Auth, err error) {
var profileName = os.Getenv("AWS_PROFILE")
if profileName == "" {
profileName = "default"
}
var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE")
if credentialsFile == "" {
var homeDir = os.Getenv("HOME")
if homeDir == "" {
err = errors.New("Could not get HOME")
return
}
credentialsFile = homeDir + "/.aws/credentials"
}
file, err := ini.LoadFile(credentialsFile)
if err != nil {
err = errors.New("Couldn't parse AWS credentials file")
return
}
var profile = file[profileName]
if profile == nil {
err = errors.New("Couldn't find profile in AWS credentials file")
return
}
auth.AccessKey = profile["aws_access_key_id"]
auth.SecretKey = profile["aws_secret_access_key"]
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file")
}
return
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// For accounts that require a security token, it is read from AWS_SECURITY_TOKEN
// variables are used.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
if auth.AccessKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
}
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
if auth.SecretKey == "" {
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
auth.Token = os.Getenv("AWS_SECURITY_TOKEN")
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

View file

@ -1,203 +0,0 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"io/ioutil"
"os"
"strings"
"testing"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestSharedAuthNoHome(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Could not get HOME")
}
func (s *S) TestSharedAuthNoCredentialsFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
os.Setenv("HOME", "/tmp")
_, err := aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't parse AWS credentials file")
}
func (s *S) TestSharedAuthNoProfileInFile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "foo")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\n"), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "Couldn't find profile in AWS credentials file")
}
func (s *S) TestSharedAuthNoKeysInProfile(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\nawsaccesskeyid = AK.."), 0644)
os.Setenv("HOME", d)
_, err = aws.SharedAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in credentials file")
}
func (s *S) TestSharedAuthDefaultCredentials(c *C) {
os.Clearenv()
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[default]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestSharedAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "bar")
d, err := ioutil.TempDir("", "")
if err != nil {
panic(err)
}
defer os.RemoveAll(d)
err = os.Mkdir(d+"/.aws", 0755)
if err != nil {
panic(err)
}
ioutil.WriteFile(d+"/.aws/credentials", []byte("[bar]\naws_access_key_id = access\naws_secret_access_key = secret\n"), 0644)
os.Setenv("HOME", d)
auth, err := aws.SharedAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthWithToken(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
os.Setenv("AWS_SECURITY_TOKEN", "token")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access", Token: "token"})
}
func (s *S) TestEnvAuthAlt(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthStatic(c *C) {
auth, err := aws.GetAuth("access", "secret")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestGetAuthEnv(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.GetAuth("", "")
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

View file

@ -1,125 +0,0 @@
package aws
import (
"math"
"net"
"net/http"
"time"
)
type RetryableFunc func(*http.Request, *http.Response, error) bool
type WaitFunc func(try int)
type DeadlineFunc func() time.Time
type ResilientTransport struct {
// Timeout is the maximum amount of time a dial will wait for
// a connect to complete.
//
// The default is no timeout.
//
// With or without a timeout, the operating system may impose
// its own earlier timeout. For instance, TCP timeouts are
// often around 3 minutes.
DialTimeout time.Duration
// MaxTries, if non-zero, specifies the number of times we will retry on
// failure. Retries are only attempted for temporary network errors or known
// safe failures.
MaxTries int
Deadline DeadlineFunc
ShouldRetry RetryableFunc
Wait WaitFunc
transport *http.Transport
}
// Convenience method for creating an http client
func NewClient(rt *ResilientTransport) *http.Client {
rt.transport = &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
if err != nil {
return nil, err
}
c.SetDeadline(rt.Deadline())
return c, nil
},
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
}
// TODO: Would be nice is ResilientTransport allowed clients to initialize
// with http.Transport attributes.
return &http.Client{
Transport: rt,
}
}
var retryingTransport = &ResilientTransport{
Deadline: func() time.Time {
return time.Now().Add(5 * time.Second)
},
DialTimeout: 10 * time.Second,
MaxTries: 3,
ShouldRetry: awsRetry,
Wait: ExpBackoff,
}
// Exported default client
var RetryingClient = NewClient(retryingTransport)
func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return t.tries(req)
}
// Retry a request a maximum of t.MaxTries times.
// We'll only retry if the proper criteria are met.
// If a wait function is specified, wait that amount of time
// In between requests.
func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
for try := 0; try < t.MaxTries; try += 1 {
res, err = t.transport.RoundTrip(req)
if !t.ShouldRetry(req, res, err) {
break
}
if res != nil {
res.Body.Close()
}
if t.Wait != nil {
t.Wait(try)
}
}
return
}
func ExpBackoff(try int) {
time.Sleep(100 * time.Millisecond *
time.Duration(math.Exp2(float64(try))))
}
func LinearBackoff(try int) {
time.Sleep(time.Duration(try*100) * time.Millisecond)
}
// Decide if we should retry a request.
// In general, the criteria for retrying a request is described here
// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
func awsRetry(req *http.Request, res *http.Response, err error) bool {
retry := false
// Retry if there's a temporary network error.
if neterr, ok := err.(net.Error); ok {
if neterr.Temporary() {
retry = true
}
}
// Retry if we get a 5xx series error.
if res != nil {
if res.StatusCode >= 500 && res.StatusCode < 600 {
retry = true
}
}
return retry
}

View file

@ -1,121 +0,0 @@
package aws_test
import (
"fmt"
"github.com/mitchellh/goamz/aws"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
// Retrieve the response from handler using aws.RetryingClient
func serveAndGet(handler http.HandlerFunc) (body string, err error) {
ts := httptest.NewServer(handler)
defer ts.Close()
resp, err := aws.RetryingClient.Get(ts.URL)
if err != nil {
return
}
if resp.StatusCode != 200 {
return "", fmt.Errorf("Bad status code: %d", resp.StatusCode)
}
greeting, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return
}
return strings.TrimSpace(string(greeting)), nil
}
func TestClient_expected(t *testing.T) {
body := "foo bar"
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_delay(t *testing.T) {
body := "baz"
wait := 4
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if wait < 0 {
// If we dipped to zero delay and still failed.
t.Fatal("Never succeeded.")
}
wait -= 1
time.Sleep(time.Second * time.Duration(wait))
fmt.Fprintln(w, body)
})
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.", resp)
}
}
func TestClient_no4xxRetry(t *testing.T) {
tries := 0
// Fail once before succeeding.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 404)
})
if err == nil {
t.Fatal("should have error")
}
if tries != 1 {
t.Fatalf("should only try once: %d", tries)
}
}
func TestClient_retries(t *testing.T) {
body := "biz"
failed := false
// Fail once before succeeding.
resp, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
if !failed {
http.Error(w, "error", 500)
failed = true
} else {
fmt.Fprintln(w, body)
}
})
if failed != true {
t.Error("We didn't retry!")
}
if err != nil {
t.Fatal(err)
}
if resp != body {
t.Fatal("Body not as expected.")
}
}
func TestClient_fails(t *testing.T) {
tries := 0
// Fail 3 times and return the last error.
_, err := serveAndGet(func(w http.ResponseWriter, r *http.Request) {
tries += 1
http.Error(w, "error", 500)
})
if err == nil {
t.Fatal(err)
}
if tries != 3 {
t.Fatal("Didn't retry enough")
}
}

View file

@ -1,893 +0,0 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
//
package s3
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/aws"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"strconv"
"strings"
"time"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
HTTPClient func() *http.Client
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var attempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{
Auth: auth,
Region: region,
HTTPClient: func() *http.Client {
return http.DefaultClient
},
private: 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) *Bucket {
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns an io.Reader specifying a LocationConstraint if
// required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() io.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// The ListBucketsResp type holds the results of a List buckets operation.
type ListBucketsResp struct {
Buckets []Bucket `xml:">Bucket"`
}
// ListBuckets lists all buckets
//
// See: http://goo.gl/NqlyMN
func (s3 *S3) ListBuckets() (result *ListBucketsResp, err error) {
req := &request{
path: "/",
}
result = &ListBucketsResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = s3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
// set S3 instance on buckets
for i := range result.Buckets {
result.Buckets[i].S3 = s3
}
return result, nil
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
headers := map[string][]string{
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: "/",
headers: headers,
payload: b.locationConstraint(),
}
return b.S3.query(req, nil)
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req := &request{
method: "DELETE",
bucket: b.Name,
path: "/",
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, nil)
if !shouldRetry(err) {
break
}
}
return err
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
data, err = ioutil.ReadAll(body)
body.Close()
return data, err
}
// GetReader retrieves an object from an S3 bucket.
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
resp, err := b.GetResponse(path)
if resp != nil {
return resp.Body, err
}
return nil, err
}
// GetResponse retrieves an object from an S3 bucket returning the http response
// It is the caller's responsibility to call Close on rc when
// finished reading.
func (b *Bucket) GetResponse(path string) (*http.Response, error) {
return b.getResponseParams(path, nil)
}
// GetTorrent retrieves an Torrent object from an S3 bucket an io.ReadCloser.
// It is the caller's responsibility to call Close on rc when finished reading.
func (b *Bucket) GetTorrentReader(path string) (io.ReadCloser, error) {
resp, err := b.getResponseParams(path, url.Values{"torrent": {""}})
if err != nil {
return nil, err
}
return resp.Body, nil
}
// GetTorrent retrieves an Torrent object from an S3, returning
// the torrent as a []byte.
func (b *Bucket) GetTorrent(path string) ([]byte, error) {
body, err := b.GetTorrentReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
func (b *Bucket) getResponseParams(path string, params url.Values) (*http.Response, error) {
req := &request{
bucket: b.Name,
path: path,
params: params,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
func (b *Bucket) Head(path string) (*http.Response, error) {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
return resp, nil
}
panic("unreachable")
}
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
/*
PutHeader - like Put, inserts an object into the S3 bucket.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error {
body := bytes.NewBuffer(data)
return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm)
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {contType},
"x-amz-acl": {string(perm)},
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
PutReaderHeader - like PutReader, inserts an object into S3 from a reader.
Instead of Content-Type string, pass in custom headers to override defaults.
*/
func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error {
// Default headers
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(length, 10)},
"Content-Type": {"application/text"},
"x-amz-acl": {string(perm)},
}
// Override with custom headers
for key, value := range customHeaders {
headers[key] = value
}
req := &request{
method: "PUT",
bucket: b.Name,
path: path,
headers: headers,
payload: r,
}
return b.S3.query(req, nil)
}
/*
Copy - copy objects inside bucket
*/
func (b *Bucket) Copy(oldPath, newPath string, perm ACL) error {
if !strings.HasPrefix(oldPath, "/") {
oldPath = "/" + oldPath
}
req := &request{
method: "PUT",
bucket: b.Name,
path: newPath,
headers: map[string][]string{
"x-amz-copy-source": {amazonEscape("/" + b.Name + oldPath)},
"x-amz-acl": {string(perm)},
},
}
err := b.S3.prepare(req)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
_, err = b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return err
}
return nil
}
panic("unreachable")
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req := &request{
method: "DELETE",
bucket: b.Name,
path: path,
}
return b.S3.query(req, nil)
}
type Object struct {
Key string
}
type MultiObjectDeleteBody struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool
Object []Object
}
func base64md5(data []byte) string {
h := md5.New()
h.Write(data)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
// MultiDel removes multiple objects from the S3 bucket efficiently.
// A maximum of 1000 keys at once may be specified.
//
// See http://goo.gl/WvA5sj for details.
func (b *Bucket) MultiDel(paths []string) error {
// create XML payload
v := MultiObjectDeleteBody{}
v.Object = make([]Object, len(paths))
for i, path := range paths {
v.Object[i] = Object{path}
}
data, _ := xml.Marshal(v)
// Content-MD5 is required
md5hash := base64md5(data)
req := &request{
method: "POST",
bucket: b.Name,
path: "/",
params: url.Values{"delete": {""}},
headers: http.Header{"Content-MD5": {md5hash}},
payload: bytes.NewReader(data),
}
return b.S3.query(req, nil)
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
params := map[string][]string{
"prefix": {prefix},
"delimiter": {delim},
"marker": {marker},
}
if max != 0 {
params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
}
req := &request{
bucket: b.Name,
params: params,
}
result = &ListResp{}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, result)
if !shouldRetry(err) {
break
}
}
if err != nil {
return nil, err
}
return result, nil
}
// Returns a mapping of all key names in this bucket to Key objects
func (b *Bucket) GetBucketContents() (*map[string]Key, error) {
bucket_contents := map[string]Key{}
prefix := ""
path_separator := ""
marker := ""
for {
contents, err := b.List(prefix, path_separator, marker, 1000)
if err != nil {
return &bucket_contents, err
}
last_key := ""
for _, key := range contents.Contents {
bucket_contents[key.Key] = key
last_key = key.Key
}
if contents.IsTruncated {
marker = contents.NextMarker
if marker == "" {
// From the s3 docs: If response does not include the
// NextMarker and it is truncated, you can use the value of the
// last Key in the response as the marker in the subsequent
// request to get the next set of object keys.
marker = last_key
}
} else {
break
}
}
return &bucket_contents, nil
}
// Get metadata from the key without returning the key content
func (b *Bucket) GetKey(path string) (*Key, error) {
req := &request{
bucket: b.Name,
path: path,
method: "HEAD",
}
err := b.S3.prepare(req)
if err != nil {
return nil, err
}
key := &Key{}
for attempt := attempts.Start(); attempt.Next(); {
resp, err := b.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return nil, err
}
key.Key = path
key.LastModified = resp.Header.Get("Last-Modified")
key.ETag = resp.Header.Get("ETag")
contentLength := resp.Header.Get("Content-Length")
size, err := strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return key, fmt.Errorf("bad s3 content-length %v: %v",
contentLength, err)
}
key.Size = size
return key, nil
}
panic("unreachable")
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
req := &request{
bucket: b.Name,
path: path,
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
u.RawQuery = ""
return u.String()
}
// SignedURL returns a signed URL that allows anyone holding the URL
// to retrieve the object at path. The signature is valid until expires.
func (b *Bucket) SignedURL(path string, expires time.Time) string {
req := &request{
bucket: b.Name,
path: path,
params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}},
}
err := b.S3.prepare(req)
if err != nil {
panic(err)
}
u, err := req.url(true)
if err != nil {
panic(err)
}
return u.String()
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
// amazonShouldEscape returns true if byte should be escaped
func amazonShouldEscape(c byte) bool {
return !((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-' || c == '~' || c == '.' || c == '/' || c == ':')
}
// amazonEscape does uri escaping exactly as Amazon does
func amazonEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
if amazonShouldEscape(s[i]) {
hexCount++
}
}
if hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
if c := s[i]; amazonShouldEscape(c) {
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
} else {
t[j] = s[i]
j++
}
}
return string(t)
}
// url returns url to resource, either full (with host/scheme) or
// partial for HTTP request
func (req *request) url(full bool) (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.Opaque = amazonEscape(req.path)
if full {
u.Opaque = "//" + u.Host + u.Opaque
}
u.RawQuery = req.params.Encode()
return u, nil
}
// query prepares and runs the req request.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) query(req *request, resp interface{}) error {
err := s3.prepare(req)
if err == nil {
var httpResponse *http.Response
httpResponse, err = s3.run(req, resp)
if resp == nil && httpResponse != nil {
httpResponse.Body.Close()
}
}
return err
}
// prepare sets up req to be delivered to S3.
func (s3 *S3) prepare(req *request) error {
if !req.prepared {
req.prepared = true
if req.method == "" {
req.method = "GET"
}
// Copy so they can be mutated without affecting on retries.
params := make(url.Values)
headers := make(http.Header)
for k, v := range req.params {
params[k] = v
}
for k, v := range req.headers {
headers[k] = v
}
req.params = params
req.headers = headers
if !strings.HasPrefix(req.path, "/") {
req.path = "/" + req.path
}
req.signpath = req.path
if req.bucket != "" {
req.baseurl = s3.Region.S3BucketEndpoint
if req.baseurl == "" {
// Use the path method to address the bucket.
req.baseurl = s3.Region.S3Endpoint
req.path = "/" + req.bucket + req.path
} else {
// Just in case, prevent injection.
if strings.IndexAny(req.bucket, "/:@") >= 0 {
return fmt.Errorf("bad S3 bucket: %q", req.bucket)
}
req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1)
}
req.signpath = "/" + req.bucket + req.signpath
} else {
req.baseurl = s3.Region.S3Endpoint
}
}
// Always sign again as it's not clear how far the
// server has handled a previous attempt.
u, err := url.Parse(req.baseurl)
if err != nil {
return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
req.headers["Host"] = []string{u.Host}
req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)}
sign(s3.Auth, req.method, amazonEscape(req.signpath), req.params, req.headers)
return nil
}
// run sends req and returns the http response from the server.
// If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it.
func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) {
if debug {
log.Printf("Running S3 request: %#v", req)
}
u, err := req.url(false)
if err != nil {
return nil, err
}
hreq := http.Request{
URL: u,
Method: req.method,
ProtoMajor: 1,
ProtoMinor: 1,
Close: true,
Header: req.headers,
}
if v, ok := req.headers["Content-Length"]; ok {
hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64)
delete(req.headers, "Content-Length")
}
if req.payload != nil {
hreq.Body = ioutil.NopCloser(req.payload)
}
hresp, err := s3.HTTPClient().Do(&hreq)
if err != nil {
return nil, err
}
if debug {
dump, _ := httputil.DumpResponse(hresp, true)
log.Printf("} -> %s\n", dump)
}
if hresp.StatusCode != 200 && hresp.StatusCode != 204 {
defer hresp.Body.Close()
return nil, buildError(hresp)
}
if resp != nil {
err = xml.NewDecoder(hresp.Body).Decode(resp)
hresp.Body.Close()
}
return hresp, err
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}

View file

@ -1,126 +0,0 @@
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"log"
"sort"
"strings"
"github.com/mitchellh/goamz/aws"
)
var b64 = base64.StdEncoding
// ----------------------------------------------------------------------------
// S3 signing (http://goo.gl/G1LrK)
var s3ParamsToSign = map[string]bool{
"acl": true,
"delete": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
}
func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
var md5, ctype, date, xamz string
var xamzDate bool
var sarray []string
// add security token
if auth.Token != "" {
headers["x-amz-security-token"] = []string{auth.Token}
}
if auth.SecretKey == "" {
// no auth secret; skip signing, e.g. for public read-only buckets.
return
}
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
sarray = append(sarray, k+":"+vall)
if k == "x-amz-date" {
xamzDate = true
date = ""
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
// Query string request authentication alternative.
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{auth.AccessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
// "When signing you do not encode these values."
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
hash := hmac.New(sha1.New, []byte(auth.SecretKey))
hash.Write([]byte(payload))
signature := make([]byte, b64.EncodedLen(hash.Size()))
b64.Encode(signature, hash.Sum(nil))
if expires {
params["Signature"] = []string{string(signature)}
} else {
headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
}
if debug {
log.Printf("Signature payload: %q", payload)
log.Printf("Signature: %q", signature)
}
}

View file

@ -1,194 +0,0 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
)
// S3 ReST authentication docs: http://goo.gl/G1LrK
var testAuth = aws.Auth{"0PN5J17HBGZHT7JJ3X82", "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o", ""}
var emptyAuth = aws.Auth{"", "", ""}
func (s *S) TestSignExampleObjectGet(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleObjectGetNoAuth(c *C) {
method := "GET"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:36:42 +0000"},
}
s3.Sign(emptyAuth, method, path, nil, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleObjectPut(c *C) {
method := "PUT"
path := "/johnsmith/photos/puppy.jpg"
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:15:45 +0000"},
"Content-Type": {"image/jpeg"},
"Content-Length": {"94328"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleList(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"prefix": {"photos"},
"max-keys": {"50"},
"marker": {"puppy"},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:42:41 +0000"},
"User-Agent": {"Mozilla/5.0"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleFetch(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleFetchNoAuth(c *C) {
method := "GET"
path := "/johnsmith/"
params := map[string][]string{
"acl": {""},
}
headers := map[string][]string{
"Host": {"johnsmith.s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 19:44:46 +0000"},
}
s3.Sign(emptyAuth, method, path, params, headers)
c.Assert(headers["Authorization"], IsNil)
}
func (s *S) TestSignExampleDelete(c *C) {
method := "DELETE"
path := "/johnsmith/photos/puppy.jpg"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Tue, 27 Mar 2007 21:20:27 +0000"},
"User-Agent": {"dotnet"},
"x-amz-date": {"Tue, 27 Mar 2007 21:20:26 +0000"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUpload(c *C) {
method := "PUT"
path := "/static.johnsmith.net/db-backup.dat.gz"
params := map[string][]string{}
headers := map[string][]string{
"Host": {"static.johnsmith.net:8080"},
"Date": {"Tue, 27 Mar 2007 21:06:08 +0000"},
"User-Agent": {"curl/7.15.5"},
"x-amz-acl": {"public-read"},
"content-type": {"application/x-download"},
"Content-MD5": {"4gJE4saaMU4BqNR0kLY+lw=="},
"X-Amz-Meta-ReviewedBy": {"joe@johnsmith.net,jane@johnsmith.net"},
"X-Amz-Meta-FileChecksum": {"0x02661779"},
"X-Amz-Meta-ChecksumAlgorithm": {"crc32"},
"Content-Disposition": {"attachment; filename=database.dat"},
"Content-Encoding": {"gzip"},
"Content-Length": {"5913339"},
}
s3.Sign(testAuth, method, path, params, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleListAllMyBuckets(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:Db+gepJSUbZKwpx1FR0DLtEYoZA="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
func (s *S) TestSignExampleUnicodeKeys(c *C) {
method := "GET"
path := "/dictionary/fran%C3%A7ais/pr%c3%a9f%c3%a8re"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:49:49 +0000"},
}
s3.Sign(testAuth, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:dxhSBHoI6eVSPcXJqEghlUzZMnY="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
}
// Not included in AWS documentation
func (s *S) TestSignWithIAMToken(c *C) {
method := "GET"
path := "/"
headers := map[string][]string{
"Host": {"s3.amazonaws.com"},
"Date": {"Wed, 28 Mar 2007 01:29:59 +0000"},
}
authWithToken := testAuth
authWithToken.Token = "totallysecret"
s3.Sign(authWithToken, method, path, nil, headers)
expected := "AWS 0PN5J17HBGZHT7JJ3X82:SJ0yQO7NpHyXJ7zkxY+/fGQ6aUw="
c.Assert(headers["Authorization"], DeepEquals, []string{expected})
c.Assert(headers["x-amz-security-token"], DeepEquals, []string{authWithToken.Token})
}

View file

@ -1,14 +0,0 @@
Copyright (c) 2013 Vaughan Newton
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,70 +0,0 @@
go-ini
======
INI parsing library for Go (golang).
View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini).
Usage
-----
Parse an INI file:
```go
import "github.com/vaughan0/go-ini"
file, err := ini.LoadFile("myfile.ini")
```
Get data from the parsed file:
```go
name, ok := file.Get("person", "name")
if !ok {
panic("'name' variable missing from 'person' section")
}
```
Iterate through values in a section:
```go
for key, value := range file["mysection"] {
fmt.Printf("%s => %s\n", key, value)
}
```
Iterate through sections in a file:
```go
for name, section := range file {
fmt.Printf("Section name: %s\n", name)
}
```
File Format
-----------
INI files are parsed by go-ini line-by-line. Each line may be one of the following:
* A section definition: [section-name]
* A property: key = value
* A comment: #blahblah _or_ ;blahblah
* Blank. The line will be ignored.
Properties defined before any section headers are placed in the default section, which has
the empty string as it's key.
Example:
```ini
# I am a comment
; So am I!
[apples]
colour = red or green
shape = applish
[oranges]
shape = square
colour = blue
```

View file

@ -1,123 +0,0 @@
// Package ini provides functions for parsing INI configuration files.
package ini
import (
"bufio"
"fmt"
"io"
"os"
"regexp"
"strings"
)
var (
sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
)
// ErrSyntax is returned when there is a syntax error in an INI file.
type ErrSyntax struct {
Line int
Source string // The contents of the erroneous line, without leading or trailing whitespace
}
func (e ErrSyntax) Error() string {
return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
}
// A File represents a parsed INI file.
type File map[string]Section
// A Section represents a single section of an INI file.
type Section map[string]string
// Returns a named Section. A Section will be created if one does not already exist for the given name.
func (f File) Section(name string) Section {
section := f[name]
if section == nil {
section = make(Section)
f[name] = section
}
return section
}
// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
func (f File) Get(section, key string) (value string, ok bool) {
if s := f[section]; s != nil {
value, ok = s[key]
}
return
}
// Loads INI data from a reader and stores the data in the File.
func (f File) Load(in io.Reader) (err error) {
bufin, ok := in.(*bufio.Reader)
if !ok {
bufin = bufio.NewReader(in)
}
return parseFile(bufin, f)
}
// Loads INI data from a named file and stores the data in the File.
func (f File) LoadFile(file string) (err error) {
in, err := os.Open(file)
if err != nil {
return
}
defer in.Close()
return f.Load(in)
}
func parseFile(in *bufio.Reader, file File) (err error) {
section := ""
lineNum := 0
for done := false; !done; {
var line string
if line, err = in.ReadString('\n'); err != nil {
if err == io.EOF {
done = true
} else {
return
}
}
lineNum++
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val := groups[1], groups[2]
key, val = strings.TrimSpace(key), strings.TrimSpace(val)
file.Section(section)[key] = val
} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
name := strings.TrimSpace(groups[1])
section = name
// Create the section if it does not exist
file.Section(section)
} else {
return ErrSyntax{lineNum, line}
}
}
return nil
}
// Loads and returns a File from a reader.
func Load(in io.Reader) (File, error) {
file := make(File)
err := file.Load(in)
return file, err
}
// Loads and returns an INI File from a file on disk.
func LoadFile(filename string) (File, error) {
file := make(File)
err := file.LoadFile(filename)
return file, err
}

View file

@ -1,43 +0,0 @@
package ini
import (
"reflect"
"syscall"
"testing"
)
func TestLoadFile(t *testing.T) {
originalOpenFiles := numFilesOpen(t)
file, err := LoadFile("test.ini")
if err != nil {
t.Fatal(err)
}
if originalOpenFiles != numFilesOpen(t) {
t.Error("test.ini not closed")
}
if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) {
t.Error("file not read correctly")
}
}
func numFilesOpen(t *testing.T) (num uint64) {
var rlimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)
if err != nil {
t.Fatal(err)
}
maxFds := int(rlimit.Cur)
var stat syscall.Stat_t
for i := 0; i < maxFds; i++ {
if syscall.Fstat(i, &stat) == nil {
num++
} else {
return
}
}
return
}

View file

@ -1,89 +0,0 @@
package ini
import (
"reflect"
"strings"
"testing"
)
func TestLoad(t *testing.T) {
src := `
# Comments are ignored
herp = derp
[foo]
hello=world
whitespace should = not matter
; sneaky semicolon-style comment
multiple = equals = signs
[bar]
this = that`
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
check := func(section, key, expect string) {
if value, _ := file.Get(section, key); value != expect {
t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value)
}
}
check("", "herp", "derp")
check("foo", "hello", "world")
check("foo", "whitespace should", "not matter")
check("foo", "multiple", "equals = signs")
check("bar", "this", "that")
}
func TestSyntaxError(t *testing.T) {
src := `
# Line 2
[foo]
bar = baz
# Here's an error on line 6:
wut?
herp = derp`
_, err := Load(strings.NewReader(src))
t.Logf("%T: %v", err, err)
if err == nil {
t.Fatal("expected an error, got nil")
}
syntaxErr, ok := err.(ErrSyntax)
if !ok {
t.Fatal("expected an error of type ErrSyntax")
}
if syntaxErr.Line != 6 {
t.Fatal("incorrect line number")
}
if syntaxErr.Source != "wut?" {
t.Fatal("incorrect source")
}
}
func TestDefinedSectionBehaviour(t *testing.T) {
check := func(src string, expect File) {
file, err := Load(strings.NewReader(src))
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(file, expect) {
t.Errorf("expected %v, got %v", expect, file)
}
}
// No sections for an empty file
check("", File{})
// Default section only if there are actually values for it
check("foo=bar", File{"": {"foo": "bar"}})
// User-defined sections should always be present, even if empty
check("[a]\n[b]\nfoo=bar", File{
"a": {},
"b": {"foo": "bar"},
})
check("foo=bar\n[a]\nthis=that", File{
"": {"foo": "bar"},
"a": {"this": "that"},
})
}

View file

@ -1,2 +0,0 @@
[default]
stuff = things

View file

@ -375,7 +375,7 @@ func TestAllocs(t *testing.T) {
<-c.Done()
},
limit: 8,
gccgoLimit: 13,
gccgoLimit: 15,
},
{
desc: "WithCancel(bg)",

View file

@ -0,0 +1,18 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package ctxhttp
import "net/http"
func canceler(client *http.Client, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

View file

@ -0,0 +1,23 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package ctxhttp
import "net/http"
type requestCanceler interface {
CancelRequest(*http.Request)
}
func canceler(client *http.Client, req *http.Request) func() {
rc, ok := client.Transport.(requestCanceler)
if !ok {
return func() {}
}
return func() {
rc.CancelRequest(req)
}
}

View file

@ -0,0 +1,79 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp
import (
"io"
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
)
// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
// If the client is nil, http.DefaultClient is used.
// If the context is canceled or times out, ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
// Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go.
cancel := canceler(client, req)
type responseAndError struct {
resp *http.Response
err error
}
result := make(chan responseAndError, 1)
go func() {
resp, err := client.Do(req)
result <- responseAndError{resp, err}
}()
select {
case <-ctx.Done():
cancel()
return nil, ctx.Err()
case r := <-result:
return r.resp, r.err
}
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View file

@ -0,0 +1,72 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ctxhttp
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"golang.org/x/net/context"
)
const (
requestDuration = 100 * time.Millisecond
requestBody = "ok"
)
func TestNoTimeout(t *testing.T) {
ctx := context.Background()
resp, err := doRequest(ctx)
if resp == nil || err != nil {
t.Fatalf("error received from client: %v %v", err, resp)
}
}
func TestCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(requestDuration / 2)
cancel()
}()
resp, err := doRequest(ctx)
if resp != nil || err == nil {
t.Fatalf("expected error, didn't get one. resp: %v", resp)
}
if err != ctx.Err() {
t.Fatalf("expected error from context but got: %v", err)
}
}
func TestCancelAfterRequest(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
resp, err := doRequest(ctx)
// Cancel before reading the body.
// Request.Body should still be readable after the context is canceled.
cancel()
b, err := ioutil.ReadAll(resp.Body)
if err != nil || string(b) != requestBody {
t.Fatalf("could not read body: %q %v", b, err)
}
}
func doRequest(ctx context.Context) (*http.Response, error) {
var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(requestDuration)
w.Write([]byte(requestBody))
})
serv := httptest.NewServer(okHandler)
defer serv.Close()
return Get(ctx, nil, serv.URL)
}

View file

@ -1,9 +1,11 @@
package aws_test
import (
"github.com/mitchellh/goamz/aws"
. "github.com/motain/gocheck"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func (S) TestAttemptTiming(c *C) {

268
Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws.go generated vendored Normal file
View file

@ -0,0 +1,268 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package aws
import (
"errors"
"os"
"strings"
)
// Region defines the URLs where AWS services may be accessed.
//
// See http://goo.gl/d8BP1 for more details.
type Region struct {
Name string // the canonical name of this region.
EC2Endpoint string
S3Endpoint string
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
SDBEndpoint string // not all regions have simpleDB, fro eg. Frankfurt (eu-central-1) does not
SNSEndpoint string
SQSEndpoint string
IAMEndpoint string
}
func (r Region) ResolveS3BucketEndpoint(bucketName string) string {
if r.S3BucketEndpoint != "" {
return strings.ToLower(strings.Replace(r.S3BucketEndpoint, "${bucket}", bucketName, -1))
}
return strings.ToLower(r.S3Endpoint + "/" + bucketName + "/")
}
var USEast = Region{
"us-east-1", // US East (N. Virginia)
"https://ec2.us-east-1.amazonaws.com",
"https://s3.amazonaws.com",
"",
false,
false,
"https://sdb.amazonaws.com",
"https://sns.us-east-1.amazonaws.com",
"https://sqs.us-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest = Region{
"us-west-1", //US West (N. California)
"https://ec2.us-west-1.amazonaws.com",
"https://s3-us-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-1.amazonaws.com",
"https://sns.us-west-1.amazonaws.com",
"https://sqs.us-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var USWest2 = Region{
"us-west-2", // US West (Oregon)
"https://ec2.us-west-2.amazonaws.com",
"https://s3-us-west-2.amazonaws.com",
"",
true,
true,
"https://sdb.us-west-2.amazonaws.com",
"https://sns.us-west-2.amazonaws.com",
"https://sqs.us-west-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var USGovWest = Region{
"us-gov-west-1", // Isolated regions, AWS GovCloud (US)
"https://ec2.us-gov-west-1.amazonaws.com",
"https://s3-us-gov-west-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.us-gov-west-1.amazonaws.com",
"https://sqs.us-gov-west-1.amazonaws.com",
"https://iam.us-gov.amazonaws.com",
}
var EUWest = Region{
"eu-west-1", // EU (Ireland)
"https://ec2.eu-west-1.amazonaws.com",
"https://s3-eu-west-1.amazonaws.com",
"",
true,
true,
"https://sdb.eu-west-1.amazonaws.com",
"https://sns.eu-west-1.amazonaws.com",
"https://sqs.eu-west-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var EUCentral = Region{
"eu-central-1", // EU (Frankfurt)
"https://ec2.eu-central-1.amazonaws.com",
"https://s3-eu-central-1.amazonaws.com",
"",
true,
true,
"",
"https://sns.eu-central-1.amazonaws.com",
"https://sqs.eu-central-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast = Region{
"ap-southeast-1", // Asia Pacific (Singapore)
"https://ec2.ap-southeast-1.amazonaws.com",
"https://s3-ap-southeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-1.amazonaws.com",
"https://sns.ap-southeast-1.amazonaws.com",
"https://sqs.ap-southeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var APSoutheast2 = Region{
"ap-southeast-2", //Asia Pacific (Sydney)
"https://ec2.ap-southeast-2.amazonaws.com",
"https://s3-ap-southeast-2.amazonaws.com",
"",
true,
true,
"https://sdb.ap-southeast-2.amazonaws.com",
"https://sns.ap-southeast-2.amazonaws.com",
"https://sqs.ap-southeast-2.amazonaws.com",
"https://iam.amazonaws.com",
}
var APNortheast = Region{
"ap-northeast-1", //Asia Pacific (Tokyo)
"https://ec2.ap-northeast-1.amazonaws.com",
"https://s3-ap-northeast-1.amazonaws.com",
"",
true,
true,
"https://sdb.ap-northeast-1.amazonaws.com",
"https://sns.ap-northeast-1.amazonaws.com",
"https://sqs.ap-northeast-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var SAEast = Region{
"sa-east-1", // South America (Sao Paulo)
"https://ec2.sa-east-1.amazonaws.com",
"https://s3-sa-east-1.amazonaws.com",
"",
true,
true,
"https://sdb.sa-east-1.amazonaws.com",
"https://sns.sa-east-1.amazonaws.com",
"https://sqs.sa-east-1.amazonaws.com",
"https://iam.amazonaws.com",
}
var CNNorth = Region{
"cn-north-1", // Isolated regions, China (Beijing)
"https://ec2.cn-north-1.amazonaws.com.cn",
"https://s3.cn-north-1.amazonaws.com.cn",
"",
true,
true,
"https://sdb.cn-north-1.amazonaws.com.cn",
"https://sns.cn-north-1.amazonaws.com.cn",
"https://sqs.cn-north-1.amazonaws.com.cn",
"https://iam.cn-north-1.amazonaws.com.cn",
}
var Regions = map[string]Region{
APNortheast.Name: APNortheast,
APSoutheast.Name: APSoutheast,
APSoutheast2.Name: APSoutheast2,
EUWest.Name: EUWest,
EUCentral.Name: EUCentral,
USEast.Name: USEast,
USWest.Name: USWest,
USWest2.Name: USWest2,
USGovWest.Name: USGovWest,
SAEast.Name: SAEast,
CNNorth.Name: CNNorth,
}
type Auth struct {
AccessKey, SecretKey string
}
var unreserved = make([]bool, 128)
var hex = "0123456789ABCDEF"
func init() {
// RFC3986
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
for _, c := range u {
unreserved[c] = true
}
}
// EnvAuth creates an Auth based on environment information.
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
// variables are used as the first preference, but EC2_ACCESS_KEY
// and EC2_SECRET_KEY or AWS_ACCESS_KEY and AWS_SECRET_KEY
// environment variables are also supported.
func EnvAuth() (auth Auth, err error) {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
// first fallbaback to EC2_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("EC2_ACCESS_KEY")
auth.SecretKey = os.Getenv("EC2_SECRET_KEY")
}
// second fallbaback to AWS_ env variable
if auth.AccessKey == "" && auth.SecretKey == "" {
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
}
if auth.AccessKey == "" {
err = errors.New("AWS_ACCESS_KEY_ID not found in environment")
}
if auth.SecretKey == "" {
err = errors.New("AWS_SECRET_ACCESS_KEY not found in environment")
}
return
}
// Encode takes a string and URI-encodes it in a way suitable
// to be used in AWS signatures.
func Encode(s string) string {
encode := false
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
encode = true
break
}
}
if !encode {
return s
}
e := make([]byte, len(s)*3)
ei := 0
for i := 0; i != len(s); i++ {
c := s[i]
if c > 127 || !unreserved[c] {
e[ei] = '%'
e[ei+1] = hex[c>>4]
e[ei+2] = hex[c&0xF]
ei += 3
} else {
e[ei] = c
ei += 1
}
}
return string(e[:ei])
}

84
Godeps/_workspace/src/gopkg.in/amz.v3/aws/aws_test.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
package aws_test
import (
"os"
"strings"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
)
func Test(t *testing.T) {
TestingT(t)
}
var _ = Suite(&S{})
type S struct {
environ []string
}
func (s *S) SetUpSuite(c *C) {
s.environ = os.Environ()
}
func (s *S) TearDownTest(c *C) {
os.Clearenv()
for _, kv := range s.environ {
l := strings.SplitN(kv, "=", 2)
os.Setenv(l[0], l[1])
}
}
func (s *S) TestEnvAuthNoSecret(c *C) {
os.Clearenv()
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_SECRET_ACCESS_KEY not found in environment")
}
func (s *S) TestEnvAuthNoAccess(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "foo")
_, err := aws.EnvAuth()
c.Assert(err, ErrorMatches, "AWS_ACCESS_KEY_ID not found in environment")
}
func (s *S) TestEnvAuth(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_ACCESS_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY_ID", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthLegacy(c *C) {
os.Clearenv()
os.Setenv("EC2_SECRET_KEY", "secret")
os.Setenv("EC2_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEnvAuthAws(c *C) {
os.Clearenv()
os.Setenv("AWS_SECRET_KEY", "secret")
os.Setenv("AWS_ACCESS_KEY", "access")
auth, err := aws.EnvAuth()
c.Assert(err, IsNil)
c.Assert(auth, Equals, aws.Auth{SecretKey: "secret", AccessKey: "access"})
}
func (s *S) TestEncode(c *C) {
c.Assert(aws.Encode("foo"), Equals, "foo")
c.Assert(aws.Encode("/"), Equals, "%2F")
}
func (s *S) TestRegionsAreNamed(c *C) {
for n, r := range aws.Regions {
c.Assert(n, Equals, r.Name)
}
}

447
Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign.go generated vendored Normal file
View file

@ -0,0 +1,447 @@
package aws
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
var debug = log.New(
// Remove the c-style comment header to front of line to debug information.
/*os.Stdout, //*/ ioutil.Discard,
"DEBUG: ",
log.LstdFlags,
)
type Signer func(*http.Request, Auth) error
// Ensure our signers meet the interface
var _ Signer = SignV2
var _ Signer = SignV4Factory("", "")
type hasher func(io.Reader) (string, error)
const (
ISO8601BasicFormat = "20060102T150405Z"
ISO8601BasicFormatShort = "20060102"
)
// SignV2 signs an HTTP request utilizing version 2 of the AWS
// signature, and the given credentials.
func SignV2(req *http.Request, auth Auth) (err error) {
queryVals := req.URL.Query()
queryVals.Set("AWSAccessKeyId", auth.AccessKey)
queryVals.Set("SignatureVersion", "2")
queryVals.Set("SignatureMethod", "HmacSHA256")
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(queryVals)
payload := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(payload, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(payload, "%s\n", req.Host),
fprintfWrapper(payload, "%s\n", uriStr),
fprintfWrapper(payload, "%s", queryStr),
); err != nil {
return err
}
hash := hmac.New(sha256.New, []byte(auth.SecretKey))
hash.Write(payload.Bytes())
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
queryVals.Set("Signature", string(signature))
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4Factory returns a version 4 Signer which will utilize the
// given region name.
func SignV4Factory(regionName, serviceName string) Signer {
return func(req *http.Request, auth Auth) error {
return SignV4(req, auth, regionName, serviceName)
}
}
func SignV4URL(req *http.Request, auth Auth, regionName, svcName string, expires time.Duration) error {
reqTime, err := requestTime(req)
if err != nil {
return err
}
req.Header.Del("date")
credScope := credentialScope(reqTime, regionName, svcName)
queryVals := req.URL.Query()
queryVals.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256")
queryVals.Set("X-Amz-Credential", auth.AccessKey+"/"+credScope)
queryVals.Set("X-Amz-Date", reqTime.Format(ISO8601BasicFormat))
queryVals.Set("X-Amz-Expires", fmt.Sprintf("%d", int(expires.Seconds())))
queryVals.Set("X-Amz-SignedHeaders", "host")
req.URL.RawQuery = queryVals.Encode()
_, canonReqHash, _, err := canonicalRequest(req, sha256Hasher, false)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
queryVals.Set("X-Amz-Signature", signature)
req.URL.RawQuery = queryVals.Encode()
return nil
}
// SignV4 signs an HTTP request utilizing version 4 of the AWS
// signature, and the given credentials.
func SignV4(req *http.Request, auth Auth, regionName, svcName string) (err error) {
var reqTime time.Time
if reqTime, err = requestTime(req); err != nil {
return err
}
// Remove any existing authorization headers as they will corrupt
// the signing.
delete(req.Header, "Authorization")
delete(req.Header, "authorization")
credScope := credentialScope(reqTime, regionName, svcName)
_, canonReqHash, sortedHdrNames, err := canonicalRequest(req, sha256Hasher, true)
if err != nil {
return err
}
var strToSign string
if strToSign, err = stringToSign(reqTime, canonReqHash, credScope); err != nil {
return err
}
key := signingKey(reqTime, auth.SecretKey, regionName, svcName)
signature := fmt.Sprintf("%x", hmacHasher(key, strToSign))
debug.Printf("strToSign:\n\"\"\"\n%s\n\"\"\"", strToSign)
var authHdrVal string
if authHdrVal, err = authHeaderString(
req.Header,
auth.AccessKey,
signature,
credScope,
sortedHdrNames,
); err != nil {
return err
}
req.Header.Set("Authorization", authHdrVal)
return nil
}
// Task 1: Create a Canonical Request.
// Returns the canonical request, and its hash.
func canonicalRequest(
req *http.Request,
hasher hasher,
calcPayHash bool,
) (canReq, canReqHash string, sortedHdrNames []string, err error) {
payHash := "UNSIGNED-PAYLOAD"
if calcPayHash {
if payHash, err = payloadHash(req, hasher); err != nil {
return
}
req.Header.Set("x-amz-content-sha256", payHash)
}
sortedHdrNames = sortHeaderNames(req.Header, "host")
var canHdr string
if canHdr, err = canonicalHeaders(sortedHdrNames, req.Host, req.Header); err != nil {
return
}
debug.Printf("canHdr:\n\"\"\"\n%s\n\"\"\"", canHdr)
debug.Printf("signedHeader: %s\n\n", strings.Join(sortedHdrNames, ";"))
uriStr := canonicalURI(req.URL)
queryStr := canonicalQueryString(req.URL.Query())
c := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(c, "%s\n", requestMethodVerb(req.Method)),
fprintfWrapper(c, "%s\n", uriStr),
fprintfWrapper(c, "%s\n", queryStr),
fprintfWrapper(c, "%s\n", canHdr),
fprintfWrapper(c, "%s\n", strings.Join(sortedHdrNames, ";")),
fprintfWrapper(c, "%s", payHash),
); err != nil {
return "", "", nil, err
}
canReq = c.String()
debug.Printf("canReq:\n\"\"\"\n%s\n\"\"\"", canReq)
canReqHash, err = hasher(bytes.NewBuffer([]byte(canReq)))
return canReq, canReqHash, sortedHdrNames, err
}
// Task 2: Create a string to Sign
// Returns a string in the defined format to sign for the authorization header.
func stringToSign(
t time.Time,
canonReqHash string,
credScope string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256\n"),
fprintfWrapper(w, "%s\n", t.Format(ISO8601BasicFormat)),
fprintfWrapper(w, "%s\n", credScope),
fprintfWrapper(w, "%s", canonReqHash),
); err != nil {
return "", err
}
return w.String(), nil
}
// Task 3: Calculate the Signature
// Returns a derived signing key.
func signingKey(t time.Time, secretKey, regionName, svcName string) []byte {
kSecret := secretKey
kDate := hmacHasher([]byte("AWS4"+kSecret), t.Format(ISO8601BasicFormatShort))
kRegion := hmacHasher(kDate, regionName)
kService := hmacHasher(kRegion, svcName)
kSigning := hmacHasher(kService, "aws4_request")
return kSigning
}
// Task 4: Add the Signing Information to the Request
// Returns a string to be placed in the Authorization header for the request.
func authHeaderString(
header http.Header,
accessKey,
signature string,
credScope string,
sortedHeaderNames []string,
) (string, error) {
w := new(bytes.Buffer)
if err := errorCollector(
fprintfWrapper(w, "AWS4-HMAC-SHA256 "),
fprintfWrapper(w, "Credential=%s/%s, ", accessKey, credScope),
fprintfWrapper(w, "SignedHeaders=%s, ", strings.Join(sortedHeaderNames, ";")),
fprintfWrapper(w, "Signature=%s", signature),
); err != nil {
return "", err
}
return w.String(), nil
}
func canonicalURI(u *url.URL) string {
// The algorithm states that if the path is empty, to just use a "/".
if u.Path == "" {
return "/"
}
// Each path segment must be URI-encoded.
segments := strings.Split(u.Path, "/")
for i, segment := range segments {
segments[i] = goToAwsUrlEncoding(url.QueryEscape(segment))
}
return strings.Join(segments, "/")
}
func canonicalQueryString(queryVals url.Values) string {
// AWS dictates that if duplicate keys exist, their values be
// sorted as well.
for _, values := range queryVals {
sort.Strings(values)
}
return goToAwsUrlEncoding(queryVals.Encode())
}
func goToAwsUrlEncoding(urlEncoded string) string {
// AWS dictates that we use %20 for encoding spaces rather than +.
// All significant +s should already be encoded into their
// hexadecimal equivalents before doing the string replace.
return strings.Replace(urlEncoded, "+", "%20", -1)
}
func canonicalHeaders(sortedHeaderNames []string, host string, hdr http.Header) (string, error) {
buffer := new(bytes.Buffer)
for _, hName := range sortedHeaderNames {
hdrVals := host
if hName != "host" {
canonHdrKey := http.CanonicalHeaderKey(hName)
sortedHdrVals := hdr[canonHdrKey]
sort.Strings(sortedHdrVals)
hdrVals = strings.Join(sortedHdrVals, ",")
}
if _, err := fmt.Fprintf(buffer, "%s:%s\n", hName, hdrVals); err != nil {
return "", err
}
}
// There is intentionally a hanging newline at the end of the
// header list.
return buffer.String(), nil
}
// Returns a SHA256 checksum of the request body. Represented as a
// lowercase hexadecimal string.
func payloadHash(req *http.Request, hasher hasher) (string, error) {
if req.Body == nil {
return hasher(bytes.NewBuffer(nil))
}
return hasher(req.Body)
}
// Retrieve the header names, lower-case them, and sort them.
func sortHeaderNames(header http.Header, injectedNames ...string) []string {
sortedNames := injectedNames
for hName, _ := range header {
sortedNames = append(sortedNames, strings.ToLower(hName))
}
sort.Strings(sortedNames)
return sortedNames
}
func hmacHasher(key []byte, value string) []byte {
h := hmac.New(sha256.New, key)
h.Write([]byte(value))
return h.Sum(nil)
}
func sha256Hasher(payloadReader io.Reader) (string, error) {
hasher := sha256.New()
_, err := io.Copy(hasher, payloadReader)
return fmt.Sprintf("%x", hasher.Sum(nil)), err
}
func credentialScope(t time.Time, regionName, svcName string) string {
return fmt.Sprintf(
"%s/%s/%s/aws4_request",
t.Format(ISO8601BasicFormatShort),
regionName,
svcName,
)
}
// We do a lot of fmt.Fprintfs in this package. Create a higher-order
// function to elide the bytes written return value so we can submit
// these calls to an error collector.
func fprintfWrapper(w io.Writer, format string, vals ...interface{}) func() error {
return func() error {
_, err := fmt.Fprintf(w, format, vals...)
return err
}
}
// Poor man's maybe monad.
func errorCollector(writers ...func() error) error {
for _, writer := range writers {
if err := writer(); err != nil {
return err
}
}
return nil
}
// Retrieve the request time from the request. We will attempt to
// parse whatever we find, but we will not make up a request date for
// the user (i.e.: Magic!).
func requestTime(req *http.Request) (time.Time, error) {
// Time formats to try. We want to do everything we can to accept
// all time formats, but ultimately we may fail. In the package
// scope so it doesn't get initialized for every request.
var timeFormats = []string{
time.RFC822,
ISO8601BasicFormat,
time.RFC1123,
time.ANSIC,
time.UnixDate,
time.RubyDate,
time.RFC822Z,
time.RFC850,
time.RFC1123Z,
time.RFC3339,
time.RFC3339Nano,
time.Kitchen,
}
// Get a date header.
var date string
if date = req.Header.Get("x-amz-date"); date == "" {
if date = req.Header.Get("date"); date == "" {
return time.Time{}, fmt.Errorf(`Could not retrieve a request date. Please provide one in either "x-amz-date", or "date".`)
}
}
// Start attempting to parse
for _, format := range timeFormats {
if parsedTime, err := time.Parse(format, date); err == nil {
return parsedTime, nil
}
}
return time.Time{}, fmt.Errorf(
"Could not parse the given date. Please utilize one of the following formats: %s",
strings.Join(timeFormats, ","),
)
}
// http.Request's Method member returns the entire method. Derive the
// verb.
func requestMethodVerb(rawMethod string) (verb string) {
verbPlus := strings.SplitN(rawMethod, " ", 2)
switch {
case len(verbPlus) == 0: // Per docs, Method will be empty if it's GET.
verb = "GET"
default:
verb = verbPlus[0]
}
return verb
}

285
Godeps/_workspace/src/gopkg.in/amz.v3/aws/sign_test.go generated vendored Normal file
View file

@ -0,0 +1,285 @@
package aws
import (
"bytes"
"fmt"
"net/http"
"time"
. "gopkg.in/check.v1"
)
var _ = Suite(&SigningSuite{})
type SigningSuite struct{}
// TODO(katco-): The signing methodology is a "one size fits all"
// approach. The hashes we check against don't include headers that
// are added in as requisite parts for S3. That doesn't mean the tests
// are invalid, or that signing is broken for these examples, but as
// long as we're adding heads in, it's impossible to know what the new
// signature should be. We should revaluate these later. See:
// https://github.com/go-amz/amz/issues/29
const v4skipReason = `Extra headers present - cannot predict generated signature (issue #29).`
// EC2 ReST authentication docs: http://goo.gl/fQmAN
var testAuth = Auth{"user", "secret"}
func (s *SigningSuite) TestV4SignedUrl(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/test.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=aeeed9bbccd4d02ee5c0109b86d86835f995330da4c265957d157751f604d404&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4SignedUrlReserved(c *C) {
auth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/some:reserved,characters", nil)
req.Header.Add("date", "Fri, 24 May 2013 00:00:00 GMT")
c.Assert(err, IsNil)
err = SignV4URL(req, auth, USEast.Name, "s3", 86400*time.Second)
c.Assert(err, IsNil)
c.Check(req.URL.String(), Equals, "https://examplebucket.s3.amazonaws.com/some:reserved,characters?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIOSFODNN7EXAMPLE%2F20130524%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20130524T000000Z&X-Amz-Expires=86400&X-Amz-Signature=ac81e03593d6fc22ac045b9353b0242da755be2af80b981eb13917d8b9cf20a4&X-Amz-SignedHeaders=host")
}
func (s *SigningSuite) TestV4StringToSign(c *C) {
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
stringToSign, err := stringToSign(
mockTime,
"3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2",
"20110909/us-east-1/iam/aws4_request",
)
c.Assert(err, IsNil)
const expected = `AWS4-HMAC-SHA256
20110909T233600Z
20110909/us-east-1/iam/aws4_request
3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2`
c.Assert(stringToSign, Equals, expected)
}
func (s *SigningSuite) TestV4CanonicalRequest(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
_, err := fmt.Fprint(body, "Action=ListUsers&Version=2010-05-08")
c.Assert(err, IsNil)
req, err := http.NewRequest("POST", "https://iam.amazonaws.com", body)
c.Assert(err, IsNil)
req.Header.Add("content-type", "application/x-www-form-urlencoded; charset=utf-8")
req.Header.Add("host", req.URL.Host)
req.Header.Add("x-amz-date", "20110909T233600Z")
canonReq, canonReqHash, _, err := canonicalRequest(
req,
sha256Hasher,
true,
)
c.Assert(err, IsNil)
const expected = `POST
/
content-type:application/x-www-form-urlencoded; charset=utf-8
host:iam.amazonaws.com
x-amz-date:20110909T233600Z
content-type;host;x-amz-date
b6359072c78d70ebee1e81adcbab4f01bf2c23245fa365ef83fe8f1f955085e2`
c.Assert(canonReq, Equals, expected)
c.Assert(canonReqHash, Equals, "3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2")
}
func (s *SigningSuite) TestV4SigningKey(c *C) {
c.Skip(v4skipReason)
mockTime, err := time.Parse(time.RFC3339, "2011-09-09T23:36:00Z")
c.Assert(err, IsNil)
c.Assert(
fmt.Sprintf("%v", signingKey(mockTime, testAuth.SecretKey, USEast.Name, "iam")),
Equals,
"[152 241 216 137 254 196 244 66 26 220 82 43 171 12 225 248 46 105 41 194 98 237 21 229 169 76 144 239 209 227 176 231]")
}
func (s *SigningSuite) TestV4BasicSignatureV4(c *C) {
c.Skip(v4skipReason)
body := new(bytes.Buffer)
req, err := http.NewRequest("POST / http/1.1", "https://host.foo.com", body)
c.Assert(err, IsNil)
req.Header.Add("Host", req.URL.Host)
req.Header.Add("Date", "Mon, 09 Sep 2011 23:36:00 GMT")
testAuth := Auth{
AccessKey: "AKIDEXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "host")
c.Assert(err, IsNil)
c.Assert(req.Header.Get("Authorization"), Equals, `AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request,SignedHeaders=date;host,Signature=22902d79e148b64e7571c3565769328423fe276eae4b26f83afceda9e767f726`)
}
func (s *SigningSuite) TestV4MoreCompleteSignature(c *C) {
req, err := http.NewRequest("GET", "https://examplebucket.s3.amazonaws.com/test.txt", nil)
c.Assert(err, IsNil)
req.Header.Set("x-amz-date", "20130524T000000Z")
req.Header.Set("Range", "bytes=0-9")
testAuth := Auth{
AccessKey: "AKIAIOSFODNN7EXAMPLE",
SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
}
err = SignV4(req, testAuth, USEast.Name, "s3")
c.Assert(err, IsNil)
c.Check(req.Header.Get("Authorization"), Equals, "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request, SignedHeaders=host;range;x-amz-content-sha256;x-amz-date, Signature=f0e8bdb87c964420e857bd35b5d6ed310bd44f0170aba48dd91039c6036bdb41")
}
//
// v2 Tests
//
func (s *SigningSuite) TestV2BasicSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
SignV2(req, testAuth)
query := req.URL.Query()
c.Assert(query.Get("SignatureVersion"), Equals, "2")
c.Assert(query.Get("SignatureMethod"), Equals, "HmacSHA256")
expected := "6lSe5QyXum0jMVc7cOUz32/52ZnL7N5RyKRk/09yiK4="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ParamSignature(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
for i := 1; i <= 3; i++ {
query.Add(fmt.Sprintf("param%d", i), fmt.Sprintf("value%d", i))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "XWOR4+0lmK8bD8CGDGZ4kfuSPbb2JibLJiCl/OPu1oU="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2ManyParams(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
orderedVals := []int{10, 2, 3, 4, 5, 6, 7, 8, 9, 1}
for i, val := range orderedVals {
query.Add(fmt.Sprintf("param%d", i+1), fmt.Sprintf("value%d", val))
}
req.URL.RawQuery = query.Encode()
SignV2(req, testAuth)
expected := "di0sjxIvezUgQ1SIL6i+C/H8lL+U0CQ9frLIak8jkVg="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2Escaping(c *C) {
req, err := http.NewRequest("GET", "http://localhost/path", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Nonce", "+ +")
req.URL.RawQuery = query.Encode()
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
query = req.URL.Query()
c.Assert(query.Get("Nonce"), Equals, "+ +")
expected := "bqffDELReIqwjg/W0DnsnVUmfLK4wXVLO4/LuG+1VFA="
c.Assert(query.Get("Signature"), Equals, expected)
}
func (s *SigningSuite) TestV2SignatureExample1(c *C) {
req, err := http.NewRequest("GET", "http://sdb.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2009-02-01T12:53:20+00:00")
query.Add("Version", "2007-11-07")
query.Add("Action", "ListDomains")
req.URL.RawQuery = query.Encode()
SignV2(req, Auth{"access", "secret"})
expected := "okj96/5ucWBSc1uR2zXVfm6mDHtgfNv657rRtt/aunQ="
c.Assert(req.URL.Query().Get("Signature"), Equals, expected)
}
// Tests example from:
// http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
// Specifically, good for testing case when URL does not contain a /
func (s *SigningSuite) TestV2SignatureTutorialExample(c *C) {
req, err := http.NewRequest("GET", "https://elasticmapreduce.amazonaws.com/", nil)
c.Assert(err, IsNil)
query := req.URL.Query()
query.Add("Timestamp", "2011-10-03T15:19:30")
query.Add("Version", "2009-03-31")
query.Add("Action", "DescribeJobFlows")
req.URL.RawQuery = query.Encode()
testAuth := Auth{"AKIAIOSFODNN7EXAMPLE", "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"}
err = SignV2(req, testAuth)
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "i91nKc4PWAt0JJIdXwz9HxZCJDdiy6cf/Mj6vPxyYIs=")
}
// https://bugs.launchpad.net/goamz/+bug/1022749
func (s *SigningSuite) TestSignatureWithEndpointPath(c *C) {
req, err := http.NewRequest("GET", "http://localhost:4444/services/Cloud", nil)
c.Assert(err, IsNil)
queryStr := req.URL.Query()
queryStr.Add("Action", "RebootInstances")
queryStr.Add("Version", "2011-12-15")
queryStr.Add("InstanceId.1", "i-10a64379")
queryStr.Add("Timestamp", time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).In(time.UTC).Format(time.RFC3339))
req.URL.RawQuery = queryStr.Encode()
err = SignV2(req, Auth{"abc", "123"})
c.Assert(err, IsNil)
c.Assert(req.URL.Query().Get("Signature"), Equals, "gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M=")
err = req.ParseForm()
c.Assert(err, IsNil)
c.Assert(req.Form["Signature"], DeepEquals, []string{"gdG/vEm+c6ehhhfkrJy3+wuVzw/rzKR42TYelMwti7M="})
}

View file

@ -1,11 +1,17 @@
package s3
import (
"github.com/mitchellh/goamz/aws"
"net/http"
"gopkg.in/amz.v3/aws"
)
var originalStrategy = attempts
func BuildError(resp *http.Response) error {
return buildError(resp)
}
func SetAttemptStrategy(s *aws.AttemptStrategy) {
if s == nil {
attempts = originalStrategy
@ -14,8 +20,8 @@ func SetAttemptStrategy(s *aws.AttemptStrategy) {
}
}
func Sign(auth aws.Auth, method, path string, params, headers map[string][]string) {
sign(auth, method, path, params, headers)
func AttemptStrategy() aws.AttemptStrategy {
return attempts
}
func SetListPartsMax(n int) {

View file

@ -8,6 +8,7 @@ import (
"encoding/xml"
"errors"
"io"
"net/http"
"sort"
"strconv"
)
@ -49,39 +50,66 @@ type listMultiResp struct {
//
// See http://goo.gl/ePioY for details.
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
params := map[string][]string{
"uploads": {""},
"max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
"prefix": {prefix},
"delimiter": {delim},
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, nil, err
}
query := req.URL.Query()
query.Add("uploads", "")
query.Add("max-uploads", strconv.FormatInt(int64(listMultiMax), 10))
query.Add("prefix", prefix)
query.Add("delimiter", delim)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing variables.
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, nil, err
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: b.Name,
params: params,
}
var resp listMultiResp
err := b.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, nil, err
}
for i := range resp.Upload {
multi := &resp.Upload[i]
var multiResp listMultiResp
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, nil, err
}
resp.Body.Close()
for i := range multiResp.Upload {
multi := &multiResp.Upload[i]
multi.Bucket = b
multis = append(multis, multi)
}
prefixes = append(prefixes, resp.CommonPrefixes...)
if !resp.IsTruncated {
prefixes = append(prefixes, multiResp.CommonPrefixes...)
if !multiResp.IsTruncated {
return multis, prefixes, nil
}
params["key-marker"] = []string{resp.NextKeyMarker}
params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
attempt = attempts.Start() // Last request worked.
query := req.URL.Query()
query.Set("key-marker", multiResp.NextKeyMarker)
query.Set("upload-id-marker", multiResp.NextUploadIdMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
panic("unreachable")
}
@ -106,35 +134,44 @@ func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) {
//
// See http://goo.gl/XP8kL for details.
func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) {
headers := map[string][]string{
"Content-Type": {contType},
"Content-Length": {"0"},
"x-amz-acl": {string(perm)},
req, err := http.NewRequest("POST", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
params := map[string][]string{
"uploads": {""},
req.URL.Path += key
query := req.URL.Query()
query.Add("uploads", "")
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-Type", contType)
req.Header.Add("Content-Length", "0")
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
req := &request{
method: "POST",
bucket: b.Name,
path: key,
headers: headers,
params: params,
}
var err error
var resp struct {
UploadId string `xml:"UploadId"`
}
for attempt := attempts.Start(); attempt.Next(); {
err = b.S3.query(req, &resp)
if !shouldRetry(err) {
break
resp, err := requestRetryLoop(req, attempts)
if err == nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
}
if err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
var multiResp struct {
UploadId string `xml:"UploadId"`
}
if err := xml.NewDecoder(resp.Body).Decode(&multiResp); err != nil {
return nil, err
}
return &Multi{Bucket: b, Key: key, UploadId: multiResp.UploadId}, nil
}
// PutPart sends part n of the multipart upload, reading all the content from r.
@ -150,45 +187,51 @@ func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
headers := map[string][]string{
"Content-Length": {strconv.FormatInt(partSize, 10)},
"Content-MD5": {md5b64},
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
params := map[string][]string{
"uploadId": {m.UploadId},
"partNumber": {strconv.FormatInt(int64(n), 10)},
req, err := http.NewRequest("PUT", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), r)
if err != nil {
return Part{}, err
}
for attempt := attempts.Start(); attempt.Next(); {
_, err := r.Seek(0, 0)
if err != nil {
return Part{}, err
}
req := &request{
method: "PUT",
bucket: m.Bucket.Name,
path: m.Key,
headers: headers,
params: params,
payload: r,
}
err = m.Bucket.S3.prepare(req)
if err != nil {
return Part{}, err
}
resp, err := m.Bucket.S3.run(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
return Part{}, err
}
etag := resp.Header.Get("ETag")
if etag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return Part{n, etag, partSize}, nil
req.Close = true
req.URL.Path += m.Key
req.ContentLength = partSize
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("partNumber", strconv.FormatInt(int64(n), 10))
req.URL.RawQuery = query.Encode()
req.Header.Add("Content-MD5", md5b64)
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return Part{}, err
}
panic("unreachable")
// Signing may read the request body.
if _, err := r.Seek(0, 0); err != nil {
return Part{}, err
}
resp, err := requestRetryLoop(req, attempts)
defer resp.Body.Close()
if err != nil {
return Part{}, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return Part{}, buildError(resp)
}
part := Part{n, resp.Header.Get("ETag"), partSize}
if part.ETag == "" {
return Part{}, errors.New("part upload succeeded with no ETag")
}
return part, nil
}
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
@ -233,35 +276,62 @@ var listPartsMax = 1000
//
// See http://goo.gl/ePioY for details.
func (m *Multi) ListParts() ([]Part, error) {
params := map[string][]string{
"uploadId": {m.UploadId},
"max-parts": {strconv.FormatInt(int64(listPartsMax), 10)},
req, err := http.NewRequest("GET", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
query.Add("max-parts", strconv.FormatInt(int64(listPartsMax), 10))
req.URL.RawQuery = query.Encode()
var parts partSlice
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "GET",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
var resp listPartsResp
err := m.Bucket.S3.query(req, &resp)
if shouldRetry(err) && attempt.HasNext() {
continue
}
if err != nil {
addAmazonDateHeader(req.Header)
// We need to resign every iteration because we're changing the URL.
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return nil, err
}
parts = append(parts, resp.Part...)
if !resp.IsTruncated {
sort.Sort(parts)
return parts, nil
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
err = buildError(resp)
}
params["part-number-marker"] = []string{resp.NextPartNumberMarker}
attempt = attempts.Start() // Last request worked.
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, err
}
var listResp listPartsResp
if err := xml.NewDecoder(resp.Body).Decode(&listResp); err != nil {
return nil, err
}
parts = append(parts, listResp.Part...)
if listResp.IsTruncated == false {
break
}
query.Set("part-number-marker", listResp.NextPartNumberMarker)
req.URL.RawQuery = query.Encode()
// Last request worked; restart our counter.
attempt = attempts.Start()
}
panic("unreachable")
sort.Sort(parts)
return parts, nil
}
type ReaderAtSeeker interface {
@ -280,6 +350,7 @@ func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
if err != nil && !hasCode(err, "NoSuchUpload") {
return nil, err
}
reuse := 0 // Index of next old part to consider reusing.
current := 1 // Part number of latest good part handled.
totalSize, err := r.Seek(0, 2)
@ -344,33 +415,54 @@ func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
//
// See http://goo.gl/2Z7Tw for details.
func (m *Multi) Complete(parts []Part) error {
params := map[string][]string{
"uploadId": {m.UploadId},
}
c := completeUpload{}
var c completeUpload
for _, p := range parts {
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
}
sort.Sort(c.Parts)
data, err := xml.Marshal(&c)
if err != nil {
return err
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "POST",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
payload: bytes.NewReader(data),
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
body := bytes.NewReader(data)
req, err := http.NewRequest(
"POST",
m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name),
body,
)
if err != nil {
return err
}
panic("unreachable")
req.Close = true
req.ContentLength = int64(len(data))
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp)
}
return nil
}
// Abort deletes an unifinished multipart upload and any previously
@ -389,21 +481,22 @@ func (m *Multi) Complete(parts []Part) error {
//
// See http://goo.gl/dnyJw for details.
func (m *Multi) Abort() error {
params := map[string][]string{
"uploadId": {m.UploadId},
req, err := http.NewRequest("DELETE", m.Bucket.Region.ResolveS3BucketEndpoint(m.Bucket.Name), nil)
if err != nil {
return nil
}
for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "DELETE",
bucket: m.Bucket.Name,
path: m.Key,
params: params,
}
err := m.Bucket.S3.query(req, nil)
if shouldRetry(err) && attempt.HasNext() {
continue
}
req.URL.Path += m.Key
query := req.URL.Query()
query.Add("uploadId", m.UploadId)
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := m.Bucket.S3.Sign(req, m.Bucket.Auth); err != nil {
return err
}
panic("unreachable")
_, err = requestRetryLoop(req, attempts)
return err
}

View file

@ -2,17 +2,20 @@ package s3_test
import (
"encoding/xml"
"github.com/mitchellh/goamz/s3"
. "github.com/motain/gocheck"
"io"
"io/ioutil"
"strings"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/s3"
)
func (s *S) TestInitMulti(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -29,12 +32,13 @@ func (s *S) TestInitMulti(c *C) {
func (s *S) TestMultiNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
s3.RetryAttempts(false)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, nil, InitMultiResultDump)
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -56,7 +60,8 @@ func (s *S) TestMultiNoPreviousUpload(c *C) {
func (s *S) TestMultiReturnOld(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.Multi("multi1", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -76,7 +81,8 @@ func (s *S) TestListParts(c *C) {
testServer.Response(404, nil, NoSuchUploadErrorDump) // :-(
testServer.Response(200, nil, ListPartsResultDump2)
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -116,7 +122,8 @@ func (s *S) TestPutPart(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, headers, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -147,7 +154,7 @@ func readAll(r io.Reader) string {
func (s *S) TestPutAllNoPreviousUpload(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
etag2 := map[string]string{"ETag": `"etag2"`}
@ -158,17 +165,18 @@ func (s *S) TestPutAllNoPreviousUpload(c *C) {
testServer.Response(200, etag2, "")
testServer.Response(200, etag3, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
c.Assert(parts, HasLen, 3)
c.Assert(parts[0].ETag, Equals, `"etag1"`)
c.Assert(parts[1].ETag, Equals, `"etag2"`)
c.Assert(parts[2].ETag, Equals, `"etag3"`)
c.Assert(err, IsNil)
c.Assert(parts, HasLen, 3)
c.Check(parts[0].ETag, Equals, `"etag1"`)
c.Check(parts[1].ETag, Equals, `"etag2"`)
c.Check(parts[2].ETag, Equals, `"etag3"`)
// Init
testServer.WaitRequest()
@ -205,14 +213,15 @@ func (s *S) TestPutAllNoPreviousUpload(c *C) {
func (s *S) TestPutAllZeroSizeFile(c *C) {
// Don't retry the NoSuchUpload error.
s.DisableRetries()
s3.RetryAttempts(false)
etag1 := map[string]string{"ETag": `"etag1"`}
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(404, nil, NoSuchUploadErrorDump)
testServer.Response(200, etag1, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -247,7 +256,8 @@ func (s *S) TestPutAllResume(c *C) {
testServer.Response(200, nil, ListPartsResultDump2)
testServer.Response(200, etag2, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -293,7 +303,8 @@ func (s *S) TestMultiComplete(c *C) {
testServer.Response(200, nil, InternalErrorDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -301,8 +312,9 @@ func (s *S) TestMultiComplete(c *C) {
err = multi.Complete([]s3.Part{{2, `"ETag2"`, 32}, {1, `"ETag1"`, 64}})
c.Assert(err, IsNil)
testServer.WaitRequest()
req := testServer.WaitRequest()
// Grab the 2nd request.
req := testServer.WaitRequests(2)[1]
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/sample/multi")
c.Assert(req.Form.Get("uploadId"), Matches, "JNbR_[A-Za-z0-9.]+QQ--")
@ -315,8 +327,7 @@ func (s *S) TestMultiComplete(c *C) {
}
}
dec := xml.NewDecoder(req.Body)
err = dec.Decode(&payload)
err = xml.NewDecoder(req.Body).Decode(&payload)
c.Assert(err, IsNil)
c.Assert(payload.XMLName.Local, Equals, "CompleteMultipartUpload")
@ -331,7 +342,8 @@ func (s *S) TestMultiAbort(c *C) {
testServer.Response(200, nil, InitMultiResultDump)
testServer.Response(200, nil, "")
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multi, err := b.InitMulti("multi", "text/plain", s3.Private)
c.Assert(err, IsNil)
@ -349,7 +361,8 @@ func (s *S) TestMultiAbort(c *C) {
func (s *S) TestListMulti(c *C) {
testServer.Response(200, nil, ListMultiResultDump)
b := s.s3.Bucket("sample")
b, err := s.s3.Bucket("sample")
c.Assert(err, IsNil)
multis, prefixes, err := b.ListMulti("", "/")
c.Assert(err, IsNil)

View file

@ -196,46 +196,3 @@ var InternalErrorDump = `
<HostId>kjhwqk</HostId>
</Error>
`
var GetKeyHeaderDump = map[string]string{
"x-amz-id-2": "ef8yU9AS1ed4OpIszj7UDNEHGran",
"x-amz-request-id": "318BC8BC143432E5",
"x-amz-version-id": "3HL4kqtJlcpXroDTDmjVBH40Nrjfkd",
"Date": "Wed, 28 Oct 2009 22:32:00 GMT",
"Last-Modified": "Sun, 1 Jan 2006 12:00:00 GMT",
"ETag": "fba9dede5f27731c9771645a39863328",
"Content-Length": "434234",
"Content-Type": "text/plain",
}
var GetListBucketsDump = `
<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>bb5c0f63b0b25f2d0</ID>
<DisplayName>joe</DisplayName>
</Owner>
<Buckets>
<Bucket>
<Name>bucket1</Name>
<CreationDate>2012-01-01T02:03:04.000Z</CreationDate>
</Bucket>
<Bucket>
<Name>bucket2</Name>
<CreationDate>2014-01-11T02:03:04.000Z</CreationDate>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>
`
var MultiDelDump = `
<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Deleted>
<Key>a.go</Key>
</Deleted>
<Deleted>
<Key>b.go</Key>
</Deleted>
</DeleteResult>
`

566
Godeps/_workspace/src/gopkg.in/amz.v3/s3/s3.go generated vendored Normal file
View file

@ -0,0 +1,566 @@
//
// goamz - Go packages to interact with the Amazon Web Services.
//
// https://wiki.ubuntu.com/goamz
//
// Copyright (c) 2011 Canonical Ltd.
//
package s3
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"gopkg.in/amz.v3/aws"
)
const debug = false
// The S3 type encapsulates operations with an S3 region.
type S3 struct {
aws.Auth
aws.Region
Sign aws.Signer
private byte // Reserve the right of using private data.
}
// The Bucket type encapsulates operations with an S3 bucket.
type Bucket struct {
*S3
Name string
}
// The Owner type represents the owner of the object in an S3 bucket.
type Owner struct {
ID string
DisplayName string
}
var (
attempts = defaultAttempts
defaultAttempts = aws.AttemptStrategy{
Min: 5,
Total: 5 * time.Second,
Delay: 200 * time.Millisecond,
}
)
// RetryAttempts sets whether failing S3 requests may be retried to cope
// with eventual consistency or temporary failures. It should not be
// called while operations are in progress.
func RetryAttempts(retry bool) {
if retry {
attempts = defaultAttempts
} else {
attempts = aws.AttemptStrategy{}
}
}
// New creates a new S3.
func New(auth aws.Auth, region aws.Region) *S3 {
return &S3{auth, region, aws.SignV4Factory(region.Name, "s3"), 0}
}
// Bucket returns a Bucket with the given name.
func (s3 *S3) Bucket(name string) (*Bucket, error) {
if strings.IndexAny(name, "/:@") >= 0 {
return nil, fmt.Errorf("bad S3 bucket: %q", name)
}
if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket {
name = strings.ToLower(name)
}
return &Bucket{s3, name}, nil
}
var createBucketConfiguration = `<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>%s</LocationConstraint>
</CreateBucketConfiguration>`
// locationConstraint returns a *strings.Reader specifying a
// LocationConstraint if required for the region.
//
// See http://goo.gl/bh9Kq for details.
func (s3 *S3) locationConstraint() *strings.Reader {
constraint := ""
if s3.Region.S3LocationConstraint {
constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name)
}
return strings.NewReader(constraint)
}
type ACL string
const (
Private = ACL("private")
PublicRead = ACL("public-read")
PublicReadWrite = ACL("public-read-write")
AuthenticatedRead = ACL("authenticated-read")
BucketOwnerRead = ACL("bucket-owner-read")
BucketOwnerFull = ACL("bucket-owner-full-control")
)
// Put inserts an object into the S3 bucket.
//
// See http://goo.gl/FEBPD for details.
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL) error {
body := bytes.NewReader(data)
return b.PutReader(path, body, int64(len(data)), contType, perm)
}
// PutBucket creates a new bucket.
//
// See http://goo.gl/ndjnR for details.
func (b *Bucket) PutBucket(perm ACL) error {
body := b.locationConstraint()
req, err := http.NewRequest("PUT", b.ResolveS3BucketEndpoint(b.Name), body)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
req.Header.Add("x-amz-acl", string(perm))
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := body.Seek(0, 0); err != nil {
return err
}
_, err = http.DefaultClient.Do(req)
return err
}
// DelBucket removes an existing S3 bucket. All objects in the bucket must
// be removed before the bucket itself can be removed.
//
// See http://goo.gl/GoBrY for details.
func (b *Bucket) DelBucket() (err error) {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Get retrieves an object from an S3 bucket.
//
// See http://goo.gl/isCO7 for details.
func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path)
if err != nil {
return nil, err
}
defer body.Close()
return ioutil.ReadAll(body)
}
// GetReader retrieves an object from an S3 bucket. It is the caller's
// responsibility to call Close on rc when finished reading.
func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
req, err := http.NewRequest("GET", b.Region.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp)
}
return resp.Body, nil
}
// PutReader inserts an object into the S3 bucket by consuming data
// from r until EOF. Passing in an io.ReadSeeker for r will optimize
// the memory usage.
func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL) error {
return b.PutReaderWithHeader(path, r, length, contType, perm, http.Header{})
}
// PutReaderWithHeader inserts an object into the S3 bucket by
// consuming data from r until EOF. It also adds the headers provided
// to the request. Passing in an io.ReadSeeker for r will optimize the
// memory usage.
func (b *Bucket) PutReaderWithHeader(path string, r io.Reader, length int64, contType string, perm ACL, hdrs http.Header) error {
// Convert the reader to a ReadSeeker so we can seek after
// signing.
seeker, ok := r.(io.ReadSeeker)
if !ok {
content, err := ioutil.ReadAll(r)
if err != nil {
return err
}
seeker = bytes.NewReader(content)
}
req, err := http.NewRequest("PUT", b.Region.ResolveS3BucketEndpoint(b.Name), seeker)
if err != nil {
return err
}
req.Header = hdrs
req.Close = true
req.URL.Path += path
req.ContentLength = length
req.Header.Add("Content-Type", contType)
req.Header.Add("x-amz-acl", string(perm))
addAmazonDateHeader(req.Header)
// Determine the current offset.
const seekFromPos = 1
prevPos, err := seeker.Seek(0, seekFromPos)
if err != nil {
return err
}
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
// Signing may read the request body.
if _, err := seeker.Seek(prevPos, 0); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return buildError(resp) // closes body
}
resp.Body.Close()
return nil
}
// Del removes an object from the S3 bucket.
//
// See http://goo.gl/APeTt for details.
func (b *Bucket) Del(path string) error {
req, err := http.NewRequest("DELETE", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return err
}
req.Close = true
req.URL.Path += path
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// The ListResp type holds the results of a List bucket operation.
type ListResp struct {
Name string
Prefix string
Delimiter string
Marker string
NextMarker string
MaxKeys int
// IsTruncated is true if the results have been truncated because
// there are more keys and prefixes than can fit in MaxKeys.
// N.B. this is the opposite sense to that documented (incorrectly) in
// http://goo.gl/YjQTc
IsTruncated bool
Contents []Key
CommonPrefixes []string `xml:">Prefix"`
}
// The Key type represents an item stored in an S3 bucket.
type Key struct {
Key string
LastModified string
Size int64
// ETag gives the hex-encoded MD5 sum of the contents,
// surrounded with double-quotes.
ETag string
StorageClass string
Owner Owner
}
// List returns information about objects in an S3 bucket.
//
// The prefix parameter limits the response to keys that begin with the
// specified prefix.
//
// The delim parameter causes the response to group all of the keys that
// share a common prefix up to the next delimiter in a single entry within
// the CommonPrefixes field. You can use delimiters to separate a bucket
// into different groupings of keys, similar to how folders would work.
//
// The marker parameter specifies the key to start with when listing objects
// in a bucket. Amazon S3 lists objects in alphabetical order and
// will return keys alphabetically greater than the marker.
//
// The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000.
//
// For example, given these keys in a bucket:
//
// index.html
// index2.html
// photos/2006/January/sample.jpg
// photos/2006/February/sample2.jpg
// photos/2006/February/sample3.jpg
// photos/2006/February/sample4.jpg
//
// Listing this bucket with delimiter set to "/" would yield the
// following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Contents: []Key{
// {Key: "index.html", "index2.html"},
// },
// CommonPrefixes: []string{
// "photos/",
// },
// }
//
// Listing the same bucket with delimiter set to "/" and prefix set to
// "photos/2006/" would yield the following result:
//
// &ListResp{
// Name: "sample-bucket",
// MaxKeys: 1000,
// Delimiter: "/",
// Prefix: "photos/2006/",
// CommonPrefixes: []string{
// "photos/2006/February/",
// "photos/2006/January/",
// },
// }
//
// See http://goo.gl/YjQTc for details.
func (b *Bucket) List(prefix, delim, marker string, max int) (*ListResp, error) {
req, err := http.NewRequest("GET", b.ResolveS3BucketEndpoint(b.Name), nil)
if err != nil {
return nil, err
}
req.Close = true
query := req.URL.Query()
query.Add("prefix", prefix)
query.Add("delimiter", delim)
query.Add("marker", marker)
if max != 0 {
query.Add("max-keys", strconv.FormatInt(int64(max), 10))
}
req.URL.RawQuery = query.Encode()
addAmazonDateHeader(req.Header)
if err := b.S3.Sign(req, b.Auth); err != nil {
return nil, err
}
resp, err := requestRetryLoop(req, attempts)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return nil, buildError(resp) // closes body
}
var result ListResp
err = xml.NewDecoder(resp.Body).Decode(&result)
resp.Body.Close()
return &result, nil
}
// URL returns a non-signed URL that allows retriving the
// object at path. It only works if the object is publicly
// readable (see SignedURL).
func (b *Bucket) URL(path string) string {
return b.ResolveS3BucketEndpoint(b.Name) + path
}
// SignedURL returns a URL which can be used to fetch objects without
// signing for the given duration.
func (b *Bucket) SignedURL(path string, expires time.Duration) (string, error) {
req, err := http.NewRequest("GET", b.URL(path), nil)
if err != nil {
return "", err
}
req.Header.Add("date", time.Now().Format(aws.ISO8601BasicFormat))
if err := aws.SignV4URL(req, b.Auth, b.Region.Name, "s3", expires); err != nil {
return "", err
}
return req.URL.String(), nil
}
type request struct {
method string
bucket string
path string
signpath string
params url.Values
headers http.Header
baseurl string
payload io.Reader
prepared bool
}
func (req *request) url() (*url.URL, error) {
u, err := url.Parse(req.baseurl)
if err != nil {
return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err)
}
u.RawQuery = req.params.Encode()
u.Path = req.path
return u, nil
}
// Error represents an error in an operation with S3.
type Error struct {
StatusCode int // HTTP status code (200, 403, ...)
Code string // EC2 error code ("UnsupportedOperation", ...)
Message string // The human-oriented error message
BucketName string
RequestId string
HostId string
}
func (e *Error) Error() string {
return e.Message
}
func buildError(r *http.Response) error {
if debug {
log.Printf("got error (status code %v)", r.StatusCode)
data, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("\tread error: %v", err)
} else {
log.Printf("\tdata:\n%s\n\n", data)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
}
err := Error{}
// TODO return error if Unmarshal fails?
xml.NewDecoder(r.Body).Decode(&err)
r.Body.Close()
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
if debug {
log.Printf("err: %#v\n", err)
}
return &err
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
switch err {
case io.ErrUnexpectedEOF, io.EOF:
return true
}
switch e := err.(type) {
case *net.DNSError:
return true
case *net.OpError:
switch e.Op {
case "read", "write":
return true
}
case *Error:
switch e.Code {
case "InternalError", "NoSuchUpload", "NoSuchBucket":
return true
}
}
return false
}
func hasCode(err error, code string) bool {
s3err, ok := err.(*Error)
return ok && s3err.Code == code
}
// requestRetryLoop attempts to send the request until the given
// strategy says to stop.
func requestRetryLoop(req *http.Request, retryStrat aws.AttemptStrategy) (*http.Response, error) {
for attempt := attempts.Start(); attempt.Next(); {
if debug {
log.Printf("Full URL (in loop): %v", req.URL)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
if shouldRetry(err) && attempt.HasNext() {
continue
}
return nil, fmt.Errorf("making request: %v", err)
}
if debug {
log.Printf("Full response (in loop): %v", resp)
}
return resp, nil
}
return nil, fmt.Errorf("could not complete the request within the specified retry attempts")
}
func addAmazonDateHeader(header http.Header) {
header.Set("x-amz-date", time.Now().In(time.UTC).Format(aws.ISO8601BasicFormat))
}

View file

@ -5,13 +5,13 @@ import (
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
func Test(t *testing.T) {
@ -28,12 +28,18 @@ var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
auth := aws.Auth{"abc", "123", ""}
s.s3 = s3.New(auth, aws.Region{Name: "faux-region-1", S3Endpoint: testServer.URL})
s.s3 = s3.New(
aws.Auth{"abc", "123"},
aws.Region{
Name: "faux-region-1",
S3Endpoint: testServer.URL,
},
)
}
func (s *S) TearDownSuite(c *C) {
s3.SetAttemptStrategy(nil)
testServer.Stop()
}
func (s *S) SetUpTest(c *C) {
@ -48,17 +54,14 @@ func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
func (s *S) DisableRetries() {
s3.SetAttemptStrategy(&aws.AttemptStrategy{})
}
// PutBucket docs: http://goo.gl/kBTCu
func (s *S) TestPutBucket(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutBucket(s3.Private)
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.PutBucket(s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
@ -67,73 +70,11 @@ func (s *S) TestPutBucket(c *C) {
c.Assert(req.Header["Date"], Not(Equals), "")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b := s.s3.Bucket("bucket")
err := b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// ListBuckets: http://goo.gl/NqlyMN
func (s *S) TestListBuckets(c *C) {
testServer.Response(200, nil, GetListBucketsDump)
buckets, err := s.s3.ListBuckets()
c.Assert(err, IsNil)
c.Assert(len(buckets.Buckets), Equals, 2)
c.Assert(buckets.Buckets[0].Name, Equals, "bucket1")
c.Assert(buckets.Buckets[1].Name, Equals, "bucket2")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestHead(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
resp, err := b.Head("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Assert(len(body), Equals, 0)
}
func (s *S) TestURL(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
url := b.URL("name")
r, err := http.Get(url)
c.Assert(err, IsNil)
@ -147,10 +88,45 @@ func (s *S) TestURL(c *C) {
c.Assert(req.URL.Path, Equals, "/bucket/name")
}
// DeleteBucket docs: http://goo.gl/GoBrY
func (s *S) TestDelBucket(c *C) {
testServer.Response(204, nil, "")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "DELETE")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.Header["Date"], Not(Equals), "")
}
// GetObject docs: http://goo.gl/isCO7
func (s *S) TestGet(c *C) {
testServer.Response(200, nil, "content")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
data, err := b.Get("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "GET")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "content")
}
func (s *S) TestGetReader(c *C) {
testServer.Response(200, nil, "content")
b := s.s3.Bucket("bucket")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
rc, err := b.GetReader("name")
c.Assert(err, IsNil)
data, err := ioutil.ReadAll(rc)
@ -169,7 +145,8 @@ func (s *S) TestGetNotFound(c *C) {
testServer.Response(404, nil, GetObjectErrorDump)
}
b := s.s3.Bucket("non-existent-bucket")
b, err := s.s3.Bucket("non-existent-bucket")
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
req := testServer.WaitRequest()
@ -194,30 +171,9 @@ func (s *S) TestGetNotFound(c *C) {
func (s *S) TestPutObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Put("name", []byte("content"), "content-type", s3.Private)
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], DeepEquals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutObjectHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.PutHeader(
"name",
[]byte("content"),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
err = b.Put("name", []byte("content"), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
@ -233,9 +189,10 @@ func (s *S) TestPutObjectHeader(c *C) {
func (s *S) TestPutReader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReader("name", buf, int64(buf.Len()), "content-type", s3.Private)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
@ -248,18 +205,15 @@ func (s *S) TestPutReader(c *C) {
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPutReaderHeader(c *C) {
func (s *S) TestPutReaderWithHeader(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
buf := bytes.NewBufferString("content")
err := b.PutReaderHeader(
"name",
buf,
int64(buf.Len()),
map[string][]string{"Content-Type": {"content-type"}},
s3.Private,
)
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
buf := bytes.NewReader([]byte("content"))
err = b.PutReaderWithHeader("name", buf, int64(buf.Len()), "content-type", s3.Private, http.Header{
"Cache-Control": []string{"max-age=5"},
})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
@ -268,44 +222,8 @@ func (s *S) TestPutReaderHeader(c *C) {
c.Assert(req.Header["Date"], Not(DeepEquals), []string{""})
c.Assert(req.Header["Content-Type"], DeepEquals, []string{"content-type"})
c.Assert(req.Header["Content-Length"], DeepEquals, []string{"7"})
//c.Assert(req.Header["Content-MD5"], Equals, "...")
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestCopy(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"old/file",
"new/file",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.URL.Path, Equals, "/bucket/new/file")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/old/file"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
}
func (s *S) TestPlusInURL(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Copy(
"dir/old+f?le",
"dir/new+f?le",
s3.Private,
)
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "PUT")
c.Assert(req.RequestURI, Equals, "/bucket/dir/new%2Bf%3Fle")
c.Assert(req.Header["X-Amz-Copy-Source"], DeepEquals, []string{"/bucket/dir/old%2Bf%3Fle"})
c.Assert(req.Header["X-Amz-Acl"], DeepEquals, []string{"private"})
c.Assert(req.Header["Cache-Control"], DeepEquals, []string{"max-age=5"})
}
// DelObject docs: http://goo.gl/APeTt
@ -313,8 +231,9 @@ func (s *S) TestPlusInURL(c *C) {
func (s *S) TestDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.Del("name")
b, err := s.s3.Bucket("bucket")
c.Assert(err, IsNil)
err = b.Del("name")
c.Assert(err, IsNil)
req := testServer.WaitRequest()
@ -323,32 +242,13 @@ func (s *S) TestDelObject(c *C) {
c.Assert(req.Header["Date"], Not(Equals), "")
}
// Delete Multiple Objects docs: http://goo.gl/WvA5sj
func (s *S) TestMultiDelObject(c *C) {
testServer.Response(200, nil, "")
b := s.s3.Bucket("bucket")
err := b.MultiDel([]string{"a", "b"})
c.Assert(err, IsNil)
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "POST")
c.Assert(req.URL.Path, Equals, "/bucket/")
c.Assert(req.RequestURI, Equals, "/bucket/?delete=")
c.Assert(req.Header["Content-Md5"], DeepEquals, []string{"nos/vZNvjGs17xIyjEFlwQ=="})
data, err := ioutil.ReadAll(req.Body)
req.Body.Close()
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "<Delete><Quiet>false</Quiet><Object><Key>a</Key></Object><Object><Key>b</Key></Object></Delete>")
}
// Bucket List Objects docs: http://goo.gl/YjQTc
func (s *S) TestList(c *C) {
testServer.Response(200, nil, GetListResultDump1)
b := s.s3.Bucket("quotes")
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("N", "", "", 0)
c.Assert(err, IsNil)
@ -387,7 +287,8 @@ func (s *S) TestList(c *C) {
func (s *S) TestListWithDelimiter(c *C) {
testServer.Response(200, nil, GetListResultDump2)
b := s.s3.Bucket("quotes")
b, err := s.s3.Bucket("quotes")
c.Assert(err, IsNil)
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
c.Assert(err, IsNil)
@ -410,26 +311,11 @@ func (s *S) TestListWithDelimiter(c *C) {
c.Assert(data.CommonPrefixes, DeepEquals, []string{"photos/2006/feb/", "photos/2006/jan/"})
}
func (s *S) TestGetKey(c *C) {
testServer.Response(200, GetKeyHeaderDump, "")
b := s.s3.Bucket("bucket")
key, err := b.GetKey("name")
req := testServer.WaitRequest()
c.Assert(req.Method, Equals, "HEAD")
c.Assert(req.URL.Path, Equals, "/bucket/name")
c.Assert(req.Header["Date"], Not(Equals), "")
c.Assert(err, IsNil)
c.Assert(key.Key, Equals, "name")
c.Assert(key.LastModified, Equals, GetKeyHeaderDump["Last-Modified"])
c.Assert(key.Size, Equals, int64(434234))
c.Assert(key.ETag, Equals, GetKeyHeaderDump["ETag"])
}
func (s *S) TestUnescapedColon(c *C) {
b := s.s3.Bucket("bucket")
u := b.URL("foo:bar")
c.Assert(u, Equals, "http://localhost:4444/bucket/foo:bar")
func (s *S) TestRetryAttempts(c *C) {
s3.SetAttemptStrategy(nil)
orig := s3.AttemptStrategy()
s3.RetryAttempts(false)
c.Assert(s3.AttemptStrategy(), Equals, aws.AttemptStrategy{})
s3.RetryAttempts(true)
c.Assert(s3.AttemptStrategy(), Equals, orig)
}

View file

@ -5,16 +5,17 @@ import (
"crypto/md5"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
"net"
"net/http"
"sort"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/testutil"
)
// AmazonServer represents an Amazon S3 server.
@ -32,7 +33,6 @@ func (s *AmazonServer) SetUp(c *C) {
var _ = Suite(&AmazonClientSuite{Region: aws.USEast})
var _ = Suite(&AmazonClientSuite{Region: aws.EUWest})
var _ = Suite(&AmazonClientSuite{Region: aws.EUCentral})
var _ = Suite(&AmazonDomainClientSuite{Region: aws.USEast})
// AmazonClientSuite tests the client against a live S3 server.
@ -71,7 +71,7 @@ func (s *AmazonDomainClientSuite) SetUpSuite(c *C) {
}
s.srv.SetUp(c)
region := s.Region
region.S3BucketEndpoint = "https://${bucket}.s3.amazonaws.com"
region.S3BucketEndpoint += "https://s3.amazonaws.com/${bucket}/"
s.s3 = s3.New(s.srv.auth, region)
s.ClientTests.Cleanup()
}
@ -84,8 +84,7 @@ func (s *AmazonDomainClientSuite) TearDownTest(c *C) {
// It is not used as a test suite in itself, but embedded within
// another type.
type ClientTests struct {
s3 *s3.S3
authIsBroken bool
s3 *s3.S3
}
func (s *ClientTests) Cleanup() {
@ -99,7 +98,18 @@ func testBucket(s *s3.S3) *s3.Bucket {
if len(key) >= 8 {
key = s.Auth.AccessKey[:8]
}
return s.Bucket(fmt.Sprintf("goamz-%s-%s", s.Region.Name, key))
b, err := s.Bucket(strings.ToLower(fmt.Sprintf(
"goamz-%s-%s-%s",
s.Region.Name,
key,
// Add in the time element to help isolate tests from one
// another.
time.Now().Format("20060102T150405.999999999"),
)))
if err != nil {
panic(err)
}
return b
}
var attempts = aws.AttemptStrategy{
@ -143,26 +153,40 @@ func killBucket(b *s3.Bucket) {
panic(message)
}
func get(url string) ([]byte, error) {
for attempt := attempts.Start(); attempt.Next(); {
resp, err := http.Get(url)
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
if attempt.HasNext() {
continue
}
return nil, err
}
return data, err
}
panic("unreachable")
func (s *ClientTests) TestSignedUrl(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
c.Assert(err, IsNil)
s.testSignedUrl(c, b, "name")
// Test that various special characters get escaped properly.
s.testSignedUrl(c, b, "&@$=:,!-_.*'( )")
}
func (s *ClientTests) testSignedUrl(c *C, b *s3.Bucket, name string) {
err := b.Put(name, []byte("test for signed URLs."), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del(name)
req, err := http.NewRequest("GET", b.URL(name), nil)
c.Assert(err, IsNil)
resp, err := http.DefaultClient.Do(req)
c.Assert(err, IsNil)
err = s3.BuildError(resp)
c.Check(err, NotNil)
c.Check(err.(*s3.Error).Code, Equals, "AccessDenied")
url, err := b.SignedURL(name, 24*time.Hour)
c.Assert(err, IsNil)
req, err = http.NewRequest("GET", url, nil)
c.Assert(err, IsNil)
resp, err = http.DefaultClient.Do(req)
c.Assert(err, IsNil)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, IsNil)
c.Check(string(body), Equals, "test for signed URLs.")
}
func (s *ClientTests) TestBasicFunctionality(c *C) {
@ -178,11 +202,7 @@ func (s *ClientTests) TestBasicFunctionality(c *C) {
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
data, err = get(b.URL("name"))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
buf := bytes.NewBufferString("hey!")
buf := bytes.NewReader([]byte("hey!"))
err = b.PutReader("name2", buf, int64(buf.Len()), "text/plain", s3.Private)
c.Assert(err, IsNil)
defer b.Del("name2")
@ -194,25 +214,10 @@ func (s *ClientTests) TestBasicFunctionality(c *C) {
c.Check(string(data), Equals, "hey!")
rc.Close()
data, err = get(b.SignedURL("name2", time.Now().Add(time.Hour)))
data, err = b.Get("name2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hey!")
if !s.authIsBroken {
data, err = get(b.SignedURL("name2", time.Now().Add(-time.Hour)))
c.Assert(err, IsNil)
c.Assert(string(data), Matches, "(?s).*AccessDenied.*")
}
err = b.DelBucket()
c.Assert(err, NotNil)
s3err, ok := err.(*s3.Error)
c.Assert(ok, Equals, true)
c.Assert(s3err.Code, Equals, "BucketNotEmpty")
c.Assert(s3err.BucketName, Equals, b.Name)
c.Assert(s3err.Message, Equals, "The bucket you tried to delete is not empty")
err = b.Del("name")
c.Assert(err, IsNil)
err = b.Del("name2")
@ -222,33 +227,9 @@ func (s *ClientTests) TestBasicFunctionality(c *C) {
c.Assert(err, IsNil)
}
func (s *ClientTests) TestCopy(c *C) {
b := testBucket(s.s3)
err := b.PutBucket(s3.PublicRead)
err = b.Put("name+1", []byte("yo!"), "text/plain", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+1")
err = b.Copy("name+1", "name+2", s3.PublicRead)
c.Assert(err, IsNil)
defer b.Del("name+2")
data, err := b.Get("name+2")
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "yo!")
err = b.Del("name+1")
c.Assert(err, IsNil)
err = b.Del("name+2")
c.Assert(err, IsNil)
err = b.DelBucket()
c.Assert(err, IsNil)
}
func (s *ClientTests) TestGetNotFound(c *C) {
b := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
b, err := s.s3.Bucket("goamz-" + s.s3.Auth.AccessKey)
c.Assert(err, IsNil)
data, err := b.Get("non-existent")
s3err, _ := err.(*s3.Error)
@ -261,28 +242,40 @@ func (s *ClientTests) TestGetNotFound(c *C) {
// Communicate with all endpoints to see if they are alive.
func (s *ClientTests) TestRegions(c *C) {
errs := make(chan error, len(aws.Regions))
type result struct {
aws.Region
error
}
results := make(chan result, len(aws.Regions))
for _, region := range aws.Regions {
go func(r aws.Region) {
s := s3.New(s.s3.Auth, r)
b := s.Bucket("goamz-" + s.Auth.AccessKey)
_, err := b.Get("non-existent")
errs <- err
b, err := s.Bucket("goamz-" + s.Auth.AccessKey)
if !c.Check(err, IsNil) {
return
}
_, err = b.Get("non-existent")
if !c.Check(err, NotNil) {
return
}
results <- result{r, err}
}(region)
}
for _ = range aws.Regions {
err := <-errs
if err != nil {
s3_err, ok := err.(*s3.Error)
if ok {
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = err.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Non-S3 error: %s", err)
result := <-results
if s3_err, ok := result.error.(*s3.Error); ok {
if result.Region == aws.CNNorth && s3_err.Code == "InvalidAccessKeyId" {
c.Log("You must utilize an account specifically for CNNorth.")
continue
}
c.Check(s3_err.Code, Matches, "NoSuchBucket")
} else if _, ok = result.error.(*net.DNSError); ok {
// Okay as well.
} else {
c.Errorf("Test should have errored but it seems to have succeeded")
c.Errorf("Non-S3 error: %s", result.error)
}
}
}
@ -391,6 +384,7 @@ func (s *ClientTests) TestDoublePutBucket(c *C) {
func (s *ClientTests) TestBucketList(c *C) {
b := testBucket(s.s3)
defer b.DelBucket()
err := b.PutBucket(s3.Private)
c.Assert(err, IsNil)

View file

@ -1,10 +1,11 @@
package s3_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/s3/s3test"
. "github.com/motain/gocheck"
. "gopkg.in/check.v1"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
)
type LocalServer struct {
@ -52,9 +53,6 @@ var (
func (s *LocalServerSuite) SetUpSuite(c *C) {
s.srv.SetUp(c)
s.clientTests.s3 = s3.New(s.srv.auth, s.srv.region)
// TODO Sadly the fake server ignores auth completely right now. :-(
s.clientTests.authIsBroken = true
s.clientTests.Cleanup()
}

View file

@ -6,7 +6,6 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
"github.com/mitchellh/goamz/s3"
"io"
"io/ioutil"
"log"
@ -19,6 +18,8 @@ import (
"strings"
"sync"
"time"
"gopkg.in/amz.v3/s3"
)
const debug = false
@ -226,13 +227,6 @@ var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?")
// resourceForURL returns a resource object for the given URL.
func (srv *Server) resourceForURL(u *url.URL) (r resource) {
if u.Path == "/" {
return serviceResource{
buckets: srv.buckets,
}
}
m := pathRegexp.FindStringSubmatch(u.Path)
if m == nil {
fatalf(404, "InvalidURI", "Couldn't parse the specified URI")
@ -290,37 +284,6 @@ func (nullResource) delete(a *action) interface{} { return notAllowed() }
const timeFormat = "2006-01-02T15:04:05.000Z07:00"
type serviceResource struct {
buckets map[string]*bucket
}
func (serviceResource) put(a *action) interface{} { return notAllowed() }
func (serviceResource) post(a *action) interface{} { return notAllowed() }
func (serviceResource) delete(a *action) interface{} { return notAllowed() }
// GET on an s3 service lists the buckets.
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html
func (r serviceResource) get(a *action) interface{} {
type respBucket struct {
Name string
}
type response struct {
Buckets []respBucket `xml:">Bucket"`
}
resp := response{}
for _, bucketPtr := range r.buckets {
bkt := respBucket{
Name: bucketPtr.name,
}
resp.Buckets = append(resp.Buckets, bkt)
}
return &resp
}
type bucketResource struct {
name string
bucket *bucket // non-nil if the bucket already exists.

View file

@ -8,8 +8,8 @@ import (
"io/ioutil"
"strings"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"github.com/restic/restic/backend"
)
@ -50,7 +50,12 @@ func Open(regionname, bucketname string) (backend.Backend, error) {
client := s3.New(auth, aws.Regions[regionname])
return OpenS3Bucket(client.Bucket(bucketname), bucketname), nil
s3bucket, s3err := client.Bucket(bucketname)
if s3err != nil {
return nil, s3err
}
return OpenS3Bucket(s3bucket, bucketname), nil
}
// Location returns this backend's location (the bucket name).
@ -97,8 +102,8 @@ func (bb *s3Blob) Finalize(t backend.Type, name string) error {
path := s3path(t, name)
// Check key does not already exist
key, err := bb.b.bucket.GetKey(path)
if err == nil && key.Key == path {
_, err := bb.b.bucket.GetReader(path)
if err == nil {
return errors.New("key already exists!")
}
@ -153,8 +158,8 @@ func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint)
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(t, name)
key, err := be.bucket.GetKey(path)
if err == nil && key.Key == path {
_, err := be.bucket.GetReader(path)
if err == nil {
found = true
}

View file

@ -3,9 +3,9 @@ package backend_test
import (
"testing"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/s3"
"github.com/mitchellh/goamz/s3/s3test"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"gopkg.in/amz.v3/s3/s3test"
bes3 "github.com/restic/restic/backend/s3"
. "github.com/restic/restic/test"
@ -34,10 +34,11 @@ func setupS3Backend(t *testing.T) *bes3.S3Backend {
S3LocationConstraint: true, // s3test server requires a LocationConstraint
}
s.auth = aws.Auth{"abc", "123", ""}
s.auth = aws.Auth{"abc", "123"}
service := s3.New(s.auth, s.region)
bucket := service.Bucket("testbucket")
bucket, berr := service.Bucket("testbucket")
OK(t, berr)
err = bucket.PutBucket("private")
OK(t, err)