Merge pull request #531 from restic/update-minio-go

Update minio-go
This commit is contained in:
Alexander Neumann 2016-06-08 22:00:16 +02:00
commit 814424fa6e
40 changed files with 459 additions and 277 deletions

View file

@ -27,7 +27,7 @@ type s3 struct {
func Open(cfg Config) (backend.Backend, error) { func Open(cfg Config) (backend.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg) debug.Log("s3.Open", "open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil { if err != nil {
return nil, err return nil, err
} }

2
vendor/manifest vendored
View file

@ -28,7 +28,7 @@
{ {
"importpath": "github.com/minio/minio-go", "importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go", "repository": "https://github.com/minio/minio-go",
"revision": "867b27701ad16db4a9f4dad40d28187ca8433ec9", "revision": "a8babf4220d5dd7240d011bdb7be567b439460f9",
"branch": "master" "branch": "master"
}, },
{ {

View file

@ -12,7 +12,8 @@ import (
) )
func main() { func main() {
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) secure := true // Make HTTPS requests by default.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", secure)
if err !!= nil { if err !!= nil {
fmt.Println(err) fmt.Println(err)
return return
@ -446,7 +447,7 @@ if err != nil {
### Presigned operations ### Presigned operations
--------------------------------------- ---------------------------------------
<a name="PresignedGetObject"> <a name="PresignedGetObject">
#### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) error #### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
Generate a presigned URL for GET. Generate a presigned URL for GET.
__Parameters__ __Parameters__
@ -471,7 +472,7 @@ if err != nil {
--------------------------------------- ---------------------------------------
<a name="PresignedPutObject"> <a name="PresignedPutObject">
#### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (string, error) #### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
Generate a presigned URL for PUT. Generate a presigned URL for PUT.
<blockquote> <blockquote>
NOTE: you can upload to S3 only with specified object name. NOTE: you can upload to S3 only with specified object name.
@ -494,7 +495,7 @@ if err != nil {
--------------------------------------- ---------------------------------------
<a name="PresignedPostPolicy"> <a name="PresignedPostPolicy">
#### PresignedPostPolicy(policy PostPolicy) (map[string]string, error) #### PresignedPostPolicy(policy PostPolicy) (*url.URL, map[string]string, error)
PresignedPostPolicy we can provide policies specifying conditions restricting PresignedPostPolicy we can provide policies specifying conditions restricting
what you want to allow in a POST request, such as bucket name where objects can be what you want to allow in a POST request, such as bucket name where objects can be
uploaded, key name prefixes that you want to allow for the object being created and more. uploaded, key name prefixes that you want to allow for the object being created and more.
@ -517,7 +518,7 @@ policy.SetContentLengthRange(1024, 1024*1024)
``` ```
Get the POST form key/value object: Get the POST form key/value object:
```go ```go
formData, err := s3Client.PresignedPostPolicy(policy) url, formData, err := s3Client.PresignedPostPolicy(policy)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return return
@ -531,5 +532,5 @@ for k, v := range m {
fmt.Printf("-F %s=%s ", k, v) fmt.Printf("-F %s=%s ", k, v)
} }
fmt.Printf("-F file=@/etc/bash.bashrc ") fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("https://my-bucketname.s3.amazonaws.com\n") fmt.Printf("%s\n", url)
``` ```

View file

@ -40,12 +40,13 @@ import (
) )
func main() { func main() {
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) secure := true // Defaults to HTTPS requests.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", secure)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
@ -85,9 +86,9 @@ func main() {
* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go) * [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
### Presigned Operations. ### Presigned Operations.
* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (string, error)](examples/s3/presignedgetobject.go) * [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (*url.URL, error)](examples/s3/presignedgetobject.go)
* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go) * [PresignedPutObject(bucketName, objectName, time.Duration) (*url.URL, error)](examples/s3/presignedputobject.go)
* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go) * [PresignedPostPolicy(NewPostPolicy()) (*url.URL, map[string]string, error)](examples/s3/presignedpostpolicy.go)
### Bucket Policy Operations. ### Bucket Policy Operations.
* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go) * [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)

View file

@ -223,3 +223,13 @@ func ErrInvalidArgument(message string) error {
RequestID: "minio", RequestID: "minio",
} }
} }
// ErrNoSuchBucketPolicy - No Such Bucket Policy response
// The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{
Code: "NoSuchBucketPolicy",
Message: message,
RequestID: "minio",
}
}

View file

@ -59,6 +59,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
select { select {
// When the done channel is closed exit our routine. // When the done channel is closed exit our routine.
case <-doneCh: case <-doneCh:
// Close the http response body before returning.
// This ends the connection with the server.
httpReader.Close()
return return
// Request message. // Request message.
case req := <-reqCh: case req := <-reqCh:

View file

@ -77,7 +77,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
// //
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel. // Allocate new list objects channel.
objectStatCh := make(chan ObjectInfo) objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/" // Default listing is delimited at "/"
delimiter := "/" delimiter := "/"
if recursive { if recursive {
@ -254,7 +254,7 @@ func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive
// listIncompleteUploads lists all incomplete uploads. // listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads. // Allocate channel for multipart uploads.
objectMultipartStatCh := make(chan ObjectMultipartInfo) objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
// Delimiter is set to "/" by default. // Delimiter is set to "/" by default.
delimiter := "/" delimiter := "/"
if recursive { if recursive {

View file

@ -33,19 +33,19 @@ var supportedGetReqParams = map[string]struct{}{
// presignURL - Returns a presigned URL for an input 'method'. // presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (urlStr string, err error) { func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
// Input validation. // Input validation.
if method == "" { if method == "" {
return "", ErrInvalidArgument("method cannot be empty.") return nil, ErrInvalidArgument("method cannot be empty.")
} }
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return "", err return nil, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return "", err return nil, err
} }
if err := isValidExpiry(expires); err != nil { if err := isValidExpiry(expires); err != nil {
return "", err return nil, err
} }
// Convert expires into seconds. // Convert expires into seconds.
@ -63,7 +63,7 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
// Verify if input map has unsupported params, if yes exit. // Verify if input map has unsupported params, if yes exit.
for k := range reqParams { for k := range reqParams {
if _, ok := supportedGetReqParams[k]; !ok { if _, ok := supportedGetReqParams[k]; !ok {
return "", ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
} }
} }
// Save the request parameters to be used in presigning for // Save the request parameters to be used in presigning for
@ -75,43 +75,48 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
// Since expires is set newRequest will presign the request. // Since expires is set newRequest will presign the request.
req, err := c.newRequest(method, reqMetadata) req, err := c.newRequest(method, reqMetadata)
if err != nil { if err != nil {
return "", err return nil, err
} }
return req.URL.String(), nil return req.URL, nil
} }
// PresignedGetObject - Returns a presigned URL to access an object // PresignedGetObject - Returns a presigned URL to access an object
// without credentials. Expires maximum is 7days - ie. 604800 and // without credentials. Expires maximum is 7days - ie. 604800 and
// minimum is 1. Additionally you can override a set of response // minimum is 1. Additionally you can override a set of response
// headers using the query parameters. // headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (url string, err error) { func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL("GET", bucketName, objectName, expires, reqParams) return c.presignURL("GET", bucketName, objectName, expires, reqParams)
} }
// PresignedPutObject - Returns a presigned URL to upload an object without credentials. // PresignedPutObject - Returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) { func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
return c.presignURL("PUT", bucketName, objectName, expires, nil) return c.presignURL("PUT", bucketName, objectName, expires, nil)
} }
// PresignedPostPolicy - Returns POST form data to upload an object at a location. // PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments. // Validate input arguments.
if p.expiration.IsZero() { if p.expiration.IsZero() {
return nil, errors.New("Expiration time must be specified") return nil, nil, errors.New("Expiration time must be specified")
} }
if _, ok := p.formData["key"]; !ok { if _, ok := p.formData["key"]; !ok {
return nil, errors.New("object key must be specified") return nil, nil, errors.New("object key must be specified")
} }
if _, ok := p.formData["bucket"]; !ok { if _, ok := p.formData["bucket"]; !ok {
return nil, errors.New("bucket name must be specified") return nil, nil, errors.New("bucket name must be specified")
} }
bucketName := p.formData["bucket"] bucketName := p.formData["bucket"]
// Fetch the bucket location. // Fetch the bucket location.
location, err := c.getBucketLocation(bucketName) location, err := c.getBucketLocation(bucketName)
if err != nil { if err != nil {
return nil, err return nil, nil, err
}
u, err = c.makeTargetURL(bucketName, "", location, nil)
if err != nil {
return nil, nil, err
} }
// Keep time. // Keep time.
@ -129,7 +134,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
} }
// Sign the policy. // Sign the policy.
p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey) p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
return p.formData, nil return u, p.formData, nil
} }
// Add date policy. // Add date policy.
@ -138,7 +143,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
condition: "$x-amz-date", condition: "$x-amz-date",
value: t.Format(iso8601DateFormat), value: t.Format(iso8601DateFormat),
}); err != nil { }); err != nil {
return nil, err return nil, nil, err
} }
// Add algorithm policy. // Add algorithm policy.
@ -147,7 +152,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
condition: "$x-amz-algorithm", condition: "$x-amz-algorithm",
value: signV4Algorithm, value: signV4Algorithm,
}); err != nil { }); err != nil {
return nil, err return nil, nil, err
} }
// Add a credential policy. // Add a credential policy.
@ -157,7 +162,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
condition: "$x-amz-credential", condition: "$x-amz-credential",
value: credential, value: credential,
}); err != nil { }); err != nil {
return nil, err return nil, nil, err
} }
// Get base64 encoded policy. // Get base64 encoded policy.
@ -168,5 +173,5 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
p.formData["x-amz-credential"] = credential p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat) p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return p.formData, nil return u, p.formData, nil
} }

View file

@ -26,6 +26,7 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"reflect"
) )
/// Bucket operations /// Bucket operations
@ -166,14 +167,22 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
return nil return nil
} }
// Remove any previous policies at this path. // Remove any previous policies at this path.
policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix) statements := removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
// generating []Statement for the given bucketPolicy. // generating []Statement for the given bucketPolicy.
statements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix) generatedStatements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix)
if err != nil { if err != nil {
return err return err
} }
policy.Statements = append(policy.Statements, statements...) statements = append(statements, generatedStatements...)
// No change in the statements indicates an attempt of setting 'none' on a prefix
// which doesn't have a pre-existing policy.
if reflect.DeepEqual(policy.Statements, statements) {
return ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
}
policy.Statements = statements
// Save the updated policies. // Save the updated policies.
return c.putBucketPolicy(bucketName, policy) return c.putBucketPolicy(bucketName, policy)
} }

View file

@ -42,7 +42,7 @@ import (
// is where each part is re-downloaded, checksummed and verified // is where each part is re-downloaded, checksummed and verified
// before upload. // before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
if size > 0 && size >= minPartSize { if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities. // Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) { if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress) return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)

View file

@ -27,78 +27,94 @@ import (
"strings" "strings"
) )
// toInt - converts go value to its integer representation based
// on the value kind if it is an integer.
func toInt(value reflect.Value) (size int64) {
size = -1
if value.IsValid() {
switch value.Kind() {
case reflect.Int:
fallthrough
case reflect.Int8:
fallthrough
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
size = value.Int()
}
}
return size
}
// getReaderSize - Determine the size of Reader if available. // getReaderSize - Determine the size of Reader if available.
func getReaderSize(reader io.Reader) (size int64, err error) { func getReaderSize(reader io.Reader) (size int64, err error) {
var result []reflect.Value
size = -1 size = -1
if reader != nil { if reader == nil {
// Verify if there is a method by name 'Size'. return -1, nil
lenFn := reflect.ValueOf(reader).MethodByName("Size") }
if lenFn.IsValid() { // Verify if there is a method by name 'Size'.
if lenFn.Kind() == reflect.Func { sizeFn := reflect.ValueOf(reader).MethodByName("Size")
// Call the 'Size' function and save its return value. // Verify if there is a method by name 'Len'.
result = lenFn.Call([]reflect.Value{}) lenFn := reflect.ValueOf(reader).MethodByName("Len")
if len(result) == 1 { if sizeFn.IsValid() {
lenValue := result[0] if sizeFn.Kind() == reflect.Func {
if lenValue.IsValid() { // Call the 'Size' function and save its return value.
switch lenValue.Kind() { result := sizeFn.Call([]reflect.Value{})
case reflect.Int: if len(result) == 1 {
fallthrough size = toInt(result[0])
case reflect.Int8: }
fallthrough }
case reflect.Int16: } else if lenFn.IsValid() {
fallthrough if lenFn.Kind() == reflect.Func {
case reflect.Int32: // Call the 'Len' function and save its return value.
fallthrough result := lenFn.Call([]reflect.Value{})
case reflect.Int64: if len(result) == 1 {
size = lenValue.Int() size = toInt(result[0])
} }
}
} else {
// Fallback to Stat() method, two possible Stat() structs exist.
switch v := reader.(type) {
case *os.File:
var st os.FileInfo
st, err = v.Stat()
if err != nil {
// Handle this case specially for "windows",
// certain files for example 'Stdin', 'Stdout' and
// 'Stderr' it is not allowed to fetch file information.
if runtime.GOOS == "windows" {
if strings.Contains(err.Error(), "GetFileInformationByHandle") {
return -1, nil
} }
} }
return
} }
} else { // Ignore if input is a directory, throw an error.
// Fallback to Stat() method, two possible Stat() structs if st.Mode().IsDir() {
// exist. return -1, ErrInvalidArgument("Input file cannot be a directory.")
switch v := reader.(type) {
case *os.File:
var st os.FileInfo
st, err = v.Stat()
if err != nil {
// Handle this case specially for "windows",
// certain files for example 'Stdin', 'Stdout' and
// 'Stderr' it is not allowed to fetch file information.
if runtime.GOOS == "windows" {
if strings.Contains(err.Error(), "GetFileInformationByHandle") {
return -1, nil
}
}
return
}
// Ignore if input is a directory, throw an error.
if st.Mode().IsDir() {
return -1, ErrInvalidArgument("Input file cannot be a directory.")
}
// Ignore 'Stdin', 'Stdout' and 'Stderr', since they
// represent *os.File type but internally do not
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
case "stdin":
fallthrough
case "stdout":
fallthrough
case "stderr":
return
}
size = st.Size()
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
size = st.Size
} }
// Ignore 'Stdin', 'Stdout' and 'Stderr', since they
// represent *os.File type but internally do not
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
case "stdin":
fallthrough
case "stdout":
fallthrough
case "stderr":
return
}
size = st.Size()
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return
}
size = st.Size
} }
} }
// Returns the size here. // Returns the size here.

View file

@ -84,8 +84,8 @@ const (
// NewV2 - instantiate minio client with Amazon S3 signature version // NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility. // '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -96,8 +96,8 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
// NewV4 - instantiate minio client with Amazon S3 signature version // NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility. // '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -108,8 +108,8 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
// New - instantiate minio client Client, adds automatic verification // New - instantiate minio client Client, adds automatic verification
// of signature. // of signature.
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -148,9 +148,9 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock() r.lk.Unlock()
} }
func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
// construct endpoint. // construct endpoint.
endpointURL, err := getEndpointURL(endpoint, insecure) endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -168,10 +168,6 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
// Instantiate http client and bucket location cache. // Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{ clnt.httpClient = &http.Client{
// Setting a sensible time out of 2minutes to wait for response
// headers. Request is pro-actively cancelled after 2minutes
// if no response was received from server.
Timeout: 2 * time.Minute,
Transport: http.DefaultTransport, Transport: http.DefaultTransport,
} }
@ -220,13 +216,6 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
} }
} }
// SetClientTimeout - set http client timeout.
func (c *Client) SetClientTimeout(timeout time.Duration) {
if c.httpClient != nil {
c.httpClient.Timeout = timeout
}
}
// TraceOn - enable HTTP tracing. // TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) { func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout. // if outputStream is nil then default to os.Stdout.
@ -568,10 +557,15 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Body = ioutil.NopCloser(metadata.contentBody) req.Body = ioutil.NopCloser(metadata.contentBody)
} }
// set 'Expect' header for the request. // FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
req.Header.Set("Expect", "100-continue") // Skip setting 'expect' header for Google Cloud Storage, there
// are some known issues - https://github.com/restic/restic/issues/520
if !isGoogleEndpoint(c.endpointURL) {
// Set 'Expect' header for the request.
req.Header.Set("Expect", "100-continue")
}
// set 'User-Agent' header for the request. // Set 'User-Agent' header for the request.
c.setUserAgent(req) c.setUserAgent(req)
// Set all headers. // Set all headers.

View file

@ -46,7 +46,7 @@ func TestMakeBucketErrorV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -92,7 +92,7 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -177,7 +177,7 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -244,7 +244,7 @@ func TestResumablePutObjectV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -355,7 +355,7 @@ func TestFPutObjectV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -503,7 +503,7 @@ func TestResumableFPutObjectV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -580,7 +580,7 @@ func TestMakeBucketRegionsV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -631,7 +631,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -769,7 +769,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -910,7 +910,7 @@ func TestCopyObjectV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -996,10 +996,6 @@ func TestCopyObjectV2(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
objInfo.Size, objInfoCopy.Size) objInfo.Size, objInfoCopy.Size)
} }
if objInfo.ETag != objInfoCopy.ETag {
t.Fatalf("Error: ETags do not match, want %v, got %v\n",
objInfoCopy.ETag, objInfo.ETag)
}
// Remove all objects and buckets // Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName) err = c.RemoveObject(bucketName, objectName)
@ -1036,7 +1032,7 @@ func TestFunctionalV2(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -1184,7 +1180,7 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
// Verify if presigned url works. // Verify if presigned url works.
resp, err := http.Get(presignedGetURL) resp, err := http.Get(presignedGetURL.String())
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -1208,7 +1204,7 @@ func TestFunctionalV2(t *testing.T) {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
// Verify if presigned url works. // Verify if presigned url works.
resp, err = http.Get(presignedGetURL) resp, err = http.Get(presignedGetURL.String())
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -1236,7 +1232,7 @@ func TestFunctionalV2(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }

View file

@ -70,7 +70,7 @@ func TestMakeBucketError(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -116,7 +116,7 @@ func TestMakeBucketRegions(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -167,7 +167,7 @@ func TestGetObjectClosedTwice(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -252,7 +252,7 @@ func TestRemovePartiallyUploaded(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -322,7 +322,7 @@ func TestResumablePutObject(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -432,7 +432,7 @@ func TestResumableFPutObject(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -512,7 +512,7 @@ func TestFPutObject(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -660,7 +660,7 @@ func TestGetObjectReadSeekFunctional(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -798,7 +798,7 @@ func TestGetObjectReadAtFunctional(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -939,7 +939,7 @@ func TestCopyObject(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -1025,10 +1025,6 @@ func TestCopyObject(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
objInfo.Size, objInfoCopy.Size) objInfo.Size, objInfoCopy.Size)
} }
if objInfo.ETag != objInfoCopy.ETag {
t.Fatalf("Error: ETags do not match, want %v, got %v\n",
objInfoCopy.ETag, objInfo.ETag)
}
// Remove all objects and buckets // Remove all objects and buckets
err = c.RemoveObject(bucketName, objectName) err = c.RemoveObject(bucketName, objectName)
@ -1065,7 +1061,7 @@ func TestFunctional(t *testing.T) {
"s3.amazonaws.com", "s3.amazonaws.com",
os.Getenv("ACCESS_KEY"), os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"), os.Getenv("SECRET_KEY"),
false, true,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
@ -1256,7 +1252,7 @@ func TestFunctional(t *testing.T) {
} }
// Verify if presigned url works. // Verify if presigned url works.
resp, err := http.Get(presignedGetURL) resp, err := http.Get(presignedGetURL.String())
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -1279,7 +1275,7 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
// Verify if presigned url works. // Verify if presigned url works.
resp, err = http.Get(presignedGetURL) resp, err = http.Get(presignedGetURL.String())
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -1306,7 +1302,7 @@ func TestFunctional(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf)) req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }

View file

@ -20,6 +20,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"sort" "sort"
"strings"
) )
// maximum supported access policy size. // maximum supported access policy size.
@ -149,7 +150,7 @@ func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPr
commonActions = true commonActions = true
continue continue
} }
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(readWriteObjectActions, statement.Actions) { if subsetActions(readWriteObjectActions, statement.Actions) {
readWrite = true readWrite = true
} }
@ -171,7 +172,7 @@ func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPr
commonActions = true commonActions = true
continue continue
} }
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(writeOnlyObjectActions, statement.Actions) { if subsetActions(writeOnlyObjectActions, statement.Actions) {
writeOnly = true writeOnly = true
} }
@ -193,7 +194,7 @@ func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPre
commonActions = true commonActions = true
continue continue
} }
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
if subsetActions(readOnlyObjectActions, statement.Actions) { if subsetActions(readOnlyObjectActions, statement.Actions) {
readOnly = true readOnly = true
break break
@ -207,9 +208,10 @@ func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPre
// Removes read write bucket policy if found. // Removes read write bucket policy if found.
func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement var newStatements []Statement
var bucketResourceStatementRemoved bool
for _, statement := range statements { for _, statement := range statements {
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
switch action { switch action {
@ -219,6 +221,7 @@ func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName str
newActions = append(newActions, action) newActions = append(newActions, action)
} }
statement.Actions = newActions statement.Actions = newActions
bucketResourceStatementRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
@ -241,9 +244,10 @@ func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName str
// Removes write only bucket policy if found. // Removes write only bucket policy if found.
func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement var newStatements []Statement
var bucketResourceStatementRemoved bool
for _, statement := range statements { for _, statement := range statements {
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
switch action { switch action {
@ -253,6 +257,7 @@ func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName str
newActions = append(newActions, action) newActions = append(newActions, action)
} }
statement.Actions = newActions statement.Actions = newActions
bucketResourceStatementRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
@ -275,9 +280,10 @@ func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName str
// Removes read only bucket policy if found. // Removes read only bucket policy if found.
func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
var newStatements []Statement var newStatements []Statement
var bucketResourceStatementRemoved bool
for _, statement := range statements { for _, statement := range statements {
for _, resource := range statement.Resources { for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName { if resource == awsResourcePrefix+bucketName && !bucketResourceStatementRemoved {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
switch action { switch action {
@ -287,6 +293,7 @@ func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName stri
newActions = append(newActions, action) newActions = append(newActions, action)
} }
statement.Actions = newActions statement.Actions = newActions
bucketResourceStatementRemoved = true
} else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" { } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
var newActions []string var newActions []string
for _, action := range statement.Actions { for _, action := range statement.Actions {
@ -307,17 +314,32 @@ func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName stri
// Remove bucket policies based on the type. // Remove bucket policies based on the type.
func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement { func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
// Verify type of policy to be removed. // Verify that a policy is defined on the object prefix, otherwise do not remove the policy
if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) { if isPolicyDefinedForObjectPrefix(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix) // Verify type of policy to be removed.
} else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) { if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix) statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
} else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) { } else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix) statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
} else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
}
} }
return statements return statements
} }
// Checks if an access policiy is defined for the given object prefix
func isPolicyDefinedForObjectPrefix(statements []Statement, bucketName string, objectPrefix string) bool {
for _, statement := range statements {
for _, resource := range statement.Resources {
if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
return true
}
}
}
return false
}
// Unmarshals bucket policy byte array into a structured bucket access policy. // Unmarshals bucket policy byte array into a structured bucket access policy.
func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) { func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
// Untyped lazy JSON struct. // Untyped lazy JSON struct.
@ -486,3 +508,30 @@ func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
statements = append(statements, bucketResourceStatement, objectResourceStatement) statements = append(statements, bucketResourceStatement, objectResourceStatement)
return statements return statements
} }
// Match function matches wild cards in 'pattern' for resource.
func resourceMatch(pattern, resource string) bool {
if pattern == "" {
return resource == pattern
}
if pattern == "*" {
return true
}
parts := strings.Split(pattern, "*")
if len(parts) == 1 {
return resource == pattern
}
tGlob := strings.HasSuffix(pattern, "*")
end := len(parts) - 1
if !strings.HasPrefix(resource, parts[0]) {
return false
}
for i := 1; i < end; i++ {
if !strings.Contains(resource, parts[i]) {
return false
}
idx := strings.Index(resource, parts[i]) + len(parts[i])
resource = resource[idx:]
}
return tGlob || strings.HasSuffix(resource, parts[end])
}

View file

@ -361,22 +361,52 @@ func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
} }
} }
// Tests validate removal of policy statement from the list of statements. // Tests validate whether access policy is defined for the given object prefix
func TestRemoveBucketPolicyStatement(t *testing.T) { func TestIsPolicyDefinedForObjectPrefix(t *testing.T) {
testCases := []struct { testCases := []struct {
bucketName string bucketName string
objectPrefix string objectPrefix string
inputStatements []Statement inputStatements []Statement
expectedResult bool
}{ }{
{"my-bucket", "", []Statement{}}, {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/"), true},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", "")}, {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "ab/"), false},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", "")}, {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abcde"), false},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", "")}, {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/de"), false},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
{"bucket", "", setReadOnlyStatement("bucket", "abc/"), false},
}
for i, testCase := range testCases {
actualResult := isPolicyDefinedForObjectPrefix(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
if actualResult != testCase.expectedResult {
t.Errorf("Test %d: Expected isPolicyDefinedForObjectPrefix to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
}
}
}
// Tests validate removal of policy statement from the list of statements.
func TestRemoveBucketPolicyStatement(t *testing.T) {
var emptyStatement []Statement
testCases := []struct {
bucketName string
objectPrefix string
inputStatements []Statement
expectedStatements []Statement
}{
{"my-bucket", "", nil, emptyStatement},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
{"my-bucket", "abcd", setReadOnlyStatement("my-bucket", "abc"), setReadOnlyStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "abc/")},
{"my-bucket", "abcd", setWriteOnlyStatement("my-bucket", "abc"), setWriteOnlyStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "abc/")},
{"my-bucket", "abcd", setReadWriteStatement("my-bucket", "abc"), setReadWriteStatement("my-bucket", "abc")},
{"my-bucket", "abc/de", setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "abc/")},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement(). if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
if len(actualStatements) != 0 {
t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1) t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
} }
} }
@ -393,10 +423,11 @@ func TestRemoveBucketPolicyStatementReadOnly(t *testing.T) {
}{ }{
{"my-bucket", "", []Statement{}, emptyStatement}, {"my-bucket", "", []Statement{}, emptyStatement},
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement}, {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
{"read-only-bucket", "abc/", setReadOnlyStatement("read-only-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "def/")...), setReadOnlyStatement("my-bucket", "def/")},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) { if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1) t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
} }
@ -414,10 +445,11 @@ func TestRemoveBucketPolicyStatementWriteOnly(t *testing.T) {
}{ }{
{"my-bucket", "", []Statement{}, emptyStatement}, {"my-bucket", "", []Statement{}, emptyStatement},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement}, {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
{"write-only-bucket", "abc/", setWriteOnlyStatement("write-only-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "def/")...), setWriteOnlyStatement("my-bucket", "def/")},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) { if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1) t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
} }
@ -435,16 +467,73 @@ func TestRemoveBucketPolicyStatementReadWrite(t *testing.T) {
}{ }{
{"my-bucket", "", []Statement{}, emptyStatement}, {"my-bucket", "", []Statement{}, emptyStatement},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement}, {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
{"read-write-bucket", "abc/", setReadWriteStatement("read-write-bucket", "abc/"), emptyStatement},
{"my-bucket", "abc/", append(setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "def/")...), setReadWriteStatement("my-bucket", "def/")},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) { if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1) t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
} }
} }
} }
// Tests validate Bucket policy resource matcher.
func TestBucketPolicyResourceMatch(t *testing.T) {
// generates\ statement with given resource..
generateStatement := func(resource string) Statement {
statement := Statement{}
statement.Resources = []string{resource}
return statement
}
// generates resource prefix.
generateResource := func(bucketName, objectName string) string {
return awsResourcePrefix + bucketName + "/" + objectName
}
testCases := []struct {
resourceToMatch string
statement Statement
expectedResourceMatch bool
}{
// Test case 1-4.
// Policy with resource ending with bucket/* allows access to all objects inside the given bucket.
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
{generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
// Test case - 5.
// Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt.
{generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), false},
// Test case - 6.
// Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt.
{generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
// Test case - 7.
// Policy with resource ending with bucket/oo* allows access to all subfolders starting with "oo" inside given bucket.
{generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
// Test case - 8.
{generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
// Test case - 9.
{generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
// Test case - 10.
// Proves that the name space is flat.
{generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
"minio-bucket"+"/*/India/*/Bihar")), true},
// Test case - 11.
// Proves that the name space is flat.
{generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
"minio-bucket"+"/*/India/*/Bihar/*")), true},
}
for i, testCase := range testCases {
actualResourceMatch := resourceMatch(testCase.statement.Resources[0], testCase.resourceToMatch)
if testCase.expectedResourceMatch != actualResourceMatch {
t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch)
}
}
}
// Tests validate whether the bucket policy is read only. // Tests validate whether the bucket policy is read only.
func TestIsBucketPolicyReadOnly(t *testing.T) { func TestIsBucketPolicyReadOnly(t *testing.T) {
testCases := []struct { testCases := []struct {
@ -458,10 +547,14 @@ func TestIsBucketPolicyReadOnly(t *testing.T) {
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true}, {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false}, {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true}, {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setReadOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setReadOnlyStatement("my-bucket", "abc/"), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), false},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if testCase.expectedResult != actualResult { if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult) t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
} }
@ -481,10 +574,13 @@ func TestIsBucketPolicyReadWrite(t *testing.T) {
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false}, {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false}, {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true}, {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setReadWriteStatement("my-bucket", ""), true},
{"my-bucket", "abc", setReadWriteStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setReadWriteStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setReadWriteStatement("my-bucket", "abc/"), true},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if testCase.expectedResult != actualResult { if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult) t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
} }
@ -504,10 +600,14 @@ func TestIsBucketPolicyWriteOnly(t *testing.T) {
{"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false}, {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
{"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true}, {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true},
{"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true}, {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), true},
{"my-bucket", "abc", setWriteOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abcde", setWriteOnlyStatement("my-bucket", "abc"), true},
{"my-bucket", "abc/d", setWriteOnlyStatement("my-bucket", "abc/"), true},
{"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), false},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix) actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
// empty statement is expected after the invocation of removeBucketPolicyStatement().
if testCase.expectedResult != actualResult { if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult) t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -29,12 +29,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values. // my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values. // and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
// and my-filename.csv are dummy values, please replace them with original values. // and my-filename.csv are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -30,12 +30,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
// my-testfile are dummy values, please replace them with original values. // my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are // Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -29,12 +29,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return return

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return return

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -30,12 +30,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -30,12 +30,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
@ -43,15 +43,17 @@ func main() {
policy := minio.NewPostPolicy() policy := minio.NewPostPolicy()
policy.SetBucket("my-bucketname") policy.SetBucket("my-bucketname")
policy.SetKey("my-objectname") policy.SetKey("my-objectname")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days // Expires in 10 days.
m, err := s3Client.PresignedPostPolicy(policy) policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10))
// Returns form data for POST form request.
url, formData, err := s3Client.PresignedPostPolicy(policy)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
fmt.Printf("curl ") fmt.Printf("curl ")
for k, v := range m { for k, v := range formData {
fmt.Printf("-F %s=%s ", k, v) fmt.Printf("-F %s=%s ", k, v)
} }
fmt.Printf("-F file=@/etc/bash.bashrc ") fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("https://my-bucketname.s3.amazonaws.com\n") fmt.Printf("%s\n", url)
} }

View file

@ -29,12 +29,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -29,12 +29,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values. // my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -29,12 +29,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values. // my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values. // dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -28,12 +28,12 @@ func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values. // are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New(). // This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -63,7 +63,7 @@ func sumHMAC(key []byte, data []byte) []byte {
} }
// getEndpointURL - construct a new endpoint. // getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") { if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint) host, _, err := net.SplitHostPort(endpoint)
if err != nil { if err != nil {
@ -79,9 +79,9 @@ func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
return nil, ErrInvalidArgument(msg) return nil, ErrInvalidArgument(msg)
} }
} }
// if inSecure is true, use 'http' scheme. // If secure is false, use 'http' scheme.
scheme := "https" scheme := "https"
if inSecure { if !secure {
scheme = "http" scheme = "http"
} }

View file

@ -27,7 +27,7 @@ func TestGetEndpointURL(t *testing.T) {
testCases := []struct { testCases := []struct {
// Inputs. // Inputs.
endPoint string endPoint string
inSecure bool secure bool
// Expected result. // Expected result.
result string result string
@ -35,23 +35,23 @@ func TestGetEndpointURL(t *testing.T) {
// Flag indicating whether the test is expected to pass or not. // Flag indicating whether the test is expected to pass or not.
shouldPass bool shouldPass bool
}{ }{
{"s3.amazonaws.com", false, "https://s3.amazonaws.com", nil, true}, {"s3.amazonaws.com", true, "https://s3.amazonaws.com", nil, true},
{"s3.cn-north-1.amazonaws.com.cn", false, "https://s3.cn-north-1.amazonaws.com.cn", nil, true}, {"s3.cn-north-1.amazonaws.com.cn", true, "https://s3.cn-north-1.amazonaws.com.cn", nil, true},
{"s3.amazonaws.com", true, "http://s3.amazonaws.com", nil, true}, {"s3.amazonaws.com", false, "http://s3.amazonaws.com", nil, true},
{"s3.cn-north-1.amazonaws.com.cn", true, "http://s3.cn-north-1.amazonaws.com.cn", nil, true}, {"s3.cn-north-1.amazonaws.com.cn", false, "http://s3.cn-north-1.amazonaws.com.cn", nil, true},
{"192.168.1.1:9000", true, "http://192.168.1.1:9000", nil, true}, {"192.168.1.1:9000", false, "http://192.168.1.1:9000", nil, true},
{"192.168.1.1:9000", false, "https://192.168.1.1:9000", nil, true}, {"192.168.1.1:9000", true, "https://192.168.1.1:9000", nil, true},
{"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false}, {"192.168.1.1::9000", false, "", fmt.Errorf("too many colons in address %s", "192.168.1.1::9000"), false},
{"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false}, {"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
{"13333.123123.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false}, {"13333.123123.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "13333.123123.-"), false},
{"s3.amazonaws.com:443", false, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false}, {"s3.amazonaws.com:443", true, "", fmt.Errorf("Amazon S3 endpoint should be 's3.amazonaws.com'."), false},
{"storage.googleapis.com:4000", false, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false}, {"storage.googleapis.com:4000", true, "", fmt.Errorf("Google Cloud Storage endpoint should be 'storage.googleapis.com'."), false},
{"s3.aamzza.-", false, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false}, {"s3.aamzza.-", true, "", fmt.Errorf("Endpoint: %s does not follow ip address or domain name standards.", "s3.aamzza.-"), false},
{"", false, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false}, {"", true, "", fmt.Errorf("Endpoint: does not follow ip address or domain name standards."), false},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := getEndpointURL(testCase.endPoint, testCase.inSecure) result, err := getEndpointURL(testCase.endPoint, testCase.secure)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error()) t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
} }