forked from TrueCloudLab/restic
Update minio/minio-go to v2.0.4
This commit is contained in:
parent
0befa06cd0
commit
1794bdc663
57 changed files with 2982 additions and 1567 deletions
4
vendor/manifest
vendored
4
vendor/manifest
vendored
|
@ -28,8 +28,8 @@
|
||||||
{
|
{
|
||||||
"importpath": "github.com/minio/minio-go",
|
"importpath": "github.com/minio/minio-go",
|
||||||
"repository": "https://github.com/minio/minio-go",
|
"repository": "https://github.com/minio/minio-go",
|
||||||
"revision": "b1674741d196d5d79486d7c1645ed6ded902b712",
|
"revision": "dcaae9ec4d0b0a81d17f22f6d7a186491f6a55ec",
|
||||||
"branch": "master"
|
"branch": "HEAD"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/pkg/errors",
|
"importpath": "github.com/pkg/errors",
|
||||||
|
|
50
vendor/src/github.com/minio/minio-go/README.md
vendored
50
vendor/src/github.com/minio/minio-go/README.md
vendored
|
@ -1,5 +1,6 @@
|
||||||
# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
|
||||||
The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
|
|
||||||
|
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
|
||||||
|
|
||||||
**Supported cloud storage providers:**
|
**Supported cloud storage providers:**
|
||||||
|
|
||||||
|
@ -14,22 +15,21 @@ The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compati
|
||||||
- Ceph Object Gateway
|
- Ceph Object Gateway
|
||||||
- Riak CS
|
- Riak CS
|
||||||
|
|
||||||
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
|
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
|
||||||
|
|
||||||
This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
|
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
|
||||||
|
|
||||||
|
|
||||||
## Download from Github
|
## Download from Github
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
||||||
$ go get -u github.com/minio/minio-go
|
go get -u github.com/minio/minio-go
|
||||||
|
|
||||||
```
|
```
|
||||||
## Initialize Minio Client
|
## Initialize Minio Client
|
||||||
|
|
||||||
You need four items to connect to Minio object storage server.
|
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
| Parameter | Description|
|
| Parameter | Description|
|
||||||
|
@ -68,7 +68,7 @@ func main() {
|
||||||
|
|
||||||
## Quick Start Example - File Uploader
|
## Quick Start Example - File Uploader
|
||||||
|
|
||||||
This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
|
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -132,11 +132,11 @@ func main() {
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
|
||||||
$ go run file-uploader.go
|
go run file-uploader.go
|
||||||
2016/08/13 17:03:28 Successfully created mymusic
|
2016/08/13 17:03:28 Successfully created mymusic
|
||||||
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
|
||||||
|
|
||||||
$ mc ls play/mymusic/
|
mc ls play/mymusic/
|
||||||
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -161,6 +161,7 @@ The full API Reference is available here.
|
||||||
|
|
||||||
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
|
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
|
||||||
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
|
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
|
||||||
|
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
|
||||||
|
|
||||||
### API Reference : Bucket notification Operations
|
### API Reference : Bucket notification Operations
|
||||||
|
|
||||||
|
@ -173,14 +174,15 @@ The full API Reference is available here.
|
||||||
|
|
||||||
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||||
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||||
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
|
|
||||||
|
|
||||||
### API Reference : Object Operations
|
### API Reference : Object Operations
|
||||||
|
|
||||||
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
|
||||||
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
|
||||||
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
|
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
|
||||||
|
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
|
||||||
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
|
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
|
||||||
|
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
|
||||||
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
|
||||||
|
|
||||||
### API Reference : Presigned Operations
|
### API Reference : Presigned Operations
|
||||||
|
@ -189,44 +191,52 @@ The full API Reference is available here.
|
||||||
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
|
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
|
||||||
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
|
||||||
|
|
||||||
|
### API Reference : Client custom settings
|
||||||
|
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
|
||||||
|
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
|
||||||
|
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
|
||||||
|
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
|
||||||
|
|
||||||
|
|
||||||
## Full Examples
|
## Full Examples
|
||||||
|
|
||||||
#### Full Examples : Bucket Operations
|
#### Full Examples : Bucket Operations
|
||||||
|
|
||||||
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
|
||||||
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
|
||||||
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
|
||||||
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
|
||||||
|
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
|
||||||
|
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
|
||||||
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
|
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
|
||||||
|
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
||||||
|
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
||||||
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
|
||||||
|
|
||||||
#### Full Examples : Bucket policy Operations
|
#### Full Examples : Bucket policy Operations
|
||||||
|
|
||||||
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
|
||||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||||
|
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||||
|
|
||||||
#### Full Examples : Bucket notification Operations
|
#### Full Examples : Bucket notification Operations
|
||||||
|
|
||||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||||
* [deletebucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketnotification.go)
|
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
|
||||||
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
|
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
|
||||||
|
|
||||||
#### Full Examples : File Object Operations
|
#### Full Examples : File Object Operations
|
||||||
|
|
||||||
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
|
||||||
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
|
||||||
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
|
|
||||||
|
|
||||||
#### Full Examples : Object Operations
|
#### Full Examples : Object Operations
|
||||||
|
|
||||||
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
|
||||||
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
|
||||||
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
|
|
||||||
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
|
|
||||||
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
|
|
||||||
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
|
||||||
|
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
|
||||||
|
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
|
||||||
|
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
|
||||||
|
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
|
||||||
|
|
||||||
#### Full Examples : Presigned Operations
|
#### Full Examples : Presigned Operations
|
||||||
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
|
||||||
|
@ -235,7 +245,7 @@ The full API Reference is available here.
|
||||||
|
|
||||||
## Explore Further
|
## Explore Further
|
||||||
* [Complete Documentation](https://docs.minio.io)
|
* [Complete Documentation](https://docs.minio.io)
|
||||||
* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
|
||||||
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
|
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
|
||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
|
|
|
@ -16,7 +16,10 @@
|
||||||
|
|
||||||
package minio
|
package minio
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// BucketInfo container for bucket metadata.
|
// BucketInfo container for bucket metadata.
|
||||||
type BucketInfo struct {
|
type BucketInfo struct {
|
||||||
|
@ -38,6 +41,10 @@ type ObjectInfo struct {
|
||||||
Size int64 `json:"size"` // Size in bytes of the object.
|
Size int64 `json:"size"` // Size in bytes of the object.
|
||||||
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
|
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
|
||||||
|
|
||||||
|
// Collection of additional metadata on the object.
|
||||||
|
// eg: x-amz-meta-*, content-encoding etc.
|
||||||
|
Metadata http.Header `json:"metadata"`
|
||||||
|
|
||||||
// Owner name.
|
// Owner name.
|
||||||
Owner struct {
|
Owner struct {
|
||||||
DisplayName string `json:"name"`
|
DisplayName string `json:"name"`
|
||||||
|
|
|
@ -149,6 +149,16 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
|
||||||
return errResp
|
return errResp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
|
||||||
|
func ErrTransferAccelerationBucket(bucketName string) error {
|
||||||
|
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
|
||||||
|
return ErrorResponse{
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Message: msg,
|
||||||
|
BucketName: bucketName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ErrEntityTooLarge - Input size is larger than supported maximum.
|
// ErrEntityTooLarge - Input size is larger than supported maximum.
|
||||||
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
|
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
|
||||||
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
|
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
|
||||||
|
@ -201,16 +211,6 @@ func ErrInvalidObjectName(message string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrInvalidParts - Invalid number of parts.
|
|
||||||
func ErrInvalidParts(expectedParts, uploadedParts int) error {
|
|
||||||
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
|
|
||||||
return ErrorResponse{
|
|
||||||
Code: "InvalidParts",
|
|
||||||
Message: msg,
|
|
||||||
RequestID: "minio",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrInvalidObjectPrefix - Invalid object prefix response is
|
// ErrInvalidObjectPrefix - Invalid object prefix response is
|
||||||
// similar to object name response.
|
// similar to object name response.
|
||||||
var ErrInvalidObjectPrefix = ErrInvalidObjectName
|
var ErrInvalidObjectPrefix = ErrInvalidObjectName
|
||||||
|
|
|
@ -249,20 +249,6 @@ func TestErrInvalidObjectName(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test validates 'ErrInvalidParts' error response.
|
|
||||||
func TestErrInvalidParts(t *testing.T) {
|
|
||||||
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", 10, 9)
|
|
||||||
expectedResult := ErrorResponse{
|
|
||||||
Code: "InvalidParts",
|
|
||||||
Message: msg,
|
|
||||||
RequestID: "minio",
|
|
||||||
}
|
|
||||||
actualResult := ErrInvalidParts(10, 9)
|
|
||||||
if !reflect.DeepEqual(expectedResult, actualResult) {
|
|
||||||
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test validates 'ErrInvalidArgument' response.
|
// Test validates 'ErrInvalidArgument' response.
|
||||||
func TestErrInvalidArgument(t *testing.T) {
|
func TestErrInvalidArgument(t *testing.T) {
|
||||||
expectedResult := ErrorResponse{
|
expectedResult := ErrorResponse{
|
||||||
|
|
|
@ -73,7 +73,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
if req.isReadAt {
|
if req.isReadAt {
|
||||||
// If this is a ReadAt request only get the specified range.
|
// If this is a ReadAt request only get the specified range.
|
||||||
// Range is set with respect to the offset and length of the buffer requested.
|
// Range is set with respect to the offset and length of the buffer requested.
|
||||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
// Do not set objectInfo from the first readAt request because it will not get
|
||||||
|
// the whole object.
|
||||||
|
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
||||||
} else {
|
} else {
|
||||||
// First request is a Read request.
|
// First request is a Read request.
|
||||||
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
||||||
|
@ -115,6 +117,19 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
objectInfo: objectInfo,
|
objectInfo: objectInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
} else if req.settingObjectInfo { // Request is just to get objectInfo.
|
||||||
|
objectInfo, err := c.StatObject(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
resCh <- getResponse{
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Send back the objectInfo.
|
||||||
|
resCh <- getResponse{
|
||||||
|
objectInfo: objectInfo,
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// Offset changes fetch the new object at an Offset.
|
// Offset changes fetch the new object at an Offset.
|
||||||
// Because the httpReader may not be set by the first
|
// Because the httpReader may not be set by the first
|
||||||
|
@ -132,7 +147,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
// Range is set with respect to the offset and length of the buffer requested.
|
// Range is set with respect to the offset and length of the buffer requested.
|
||||||
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
|
||||||
} else {
|
} else {
|
||||||
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resCh <- getResponse{
|
resCh <- getResponse{
|
||||||
|
@ -155,6 +170,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
|
||||||
Size: int(size),
|
Size: int(size),
|
||||||
Error: err,
|
Error: err,
|
||||||
didRead: true,
|
didRead: true,
|
||||||
|
objectInfo: objectInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -175,6 +191,7 @@ type getRequest struct {
|
||||||
isReadAt bool // Determines if this request is a request to a specific range
|
isReadAt bool // Determines if this request is a request to a specific range
|
||||||
isReadOp bool // Determines if this request is a Read or Read/At request.
|
isReadOp bool // Determines if this request is a Read or Read/At request.
|
||||||
isFirstReq bool // Determines if this request is the first time an object is being accessed.
|
isFirstReq bool // Determines if this request is the first time an object is being accessed.
|
||||||
|
settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
|
||||||
}
|
}
|
||||||
|
|
||||||
// get response message container to reply back for the request.
|
// get response message container to reply back for the request.
|
||||||
|
@ -195,10 +212,12 @@ type Object struct {
|
||||||
reqCh chan<- getRequest
|
reqCh chan<- getRequest
|
||||||
resCh <-chan getResponse
|
resCh <-chan getResponse
|
||||||
doneCh chan<- struct{}
|
doneCh chan<- struct{}
|
||||||
prevOffset int64
|
|
||||||
currOffset int64
|
currOffset int64
|
||||||
objectInfo ObjectInfo
|
objectInfo ObjectInfo
|
||||||
|
|
||||||
|
// Ask lower level to initiate data fetching based on currOffset
|
||||||
|
seekData bool
|
||||||
|
|
||||||
// Keeps track of closed call.
|
// Keeps track of closed call.
|
||||||
isClosed bool
|
isClosed bool
|
||||||
|
|
||||||
|
@ -210,6 +229,9 @@ type Object struct {
|
||||||
|
|
||||||
// Keeps track of if this object has been read yet.
|
// Keeps track of if this object has been read yet.
|
||||||
beenRead bool
|
beenRead bool
|
||||||
|
|
||||||
|
// Keeps track of if objectInfo has been set yet.
|
||||||
|
objectInfoSet bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
|
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
|
||||||
|
@ -221,11 +243,15 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||||
response := <-o.resCh
|
response := <-o.resCh
|
||||||
// This was the first request.
|
// This was the first request.
|
||||||
if !o.isStarted {
|
if !o.isStarted {
|
||||||
// Set objectInfo for first time.
|
|
||||||
o.objectInfo = response.objectInfo
|
|
||||||
// The object has been operated on.
|
// The object has been operated on.
|
||||||
o.isStarted = true
|
o.isStarted = true
|
||||||
}
|
}
|
||||||
|
// Set the objectInfo if the request was not readAt
|
||||||
|
// and it hasn't been set before.
|
||||||
|
if !o.objectInfoSet && !request.isReadAt {
|
||||||
|
o.objectInfo = response.objectInfo
|
||||||
|
o.objectInfoSet = true
|
||||||
|
}
|
||||||
// Set beenRead only if it has not been set before.
|
// Set beenRead only if it has not been set before.
|
||||||
if !o.beenRead {
|
if !o.beenRead {
|
||||||
o.beenRead = response.didRead
|
o.beenRead = response.didRead
|
||||||
|
@ -235,6 +261,9 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||||
return response, response.Error
|
return response, response.Error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Data are ready on the wire, no need to reinitiate connection in lower level
|
||||||
|
o.seekData = false
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,8 +272,6 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
|
||||||
func (o *Object) setOffset(bytesRead int64) error {
|
func (o *Object) setOffset(bytesRead int64) error {
|
||||||
// Update the currentOffset.
|
// Update the currentOffset.
|
||||||
o.currOffset += bytesRead
|
o.currOffset += bytesRead
|
||||||
// Save the current offset as previous offset.
|
|
||||||
o.prevOffset = o.currOffset
|
|
||||||
|
|
||||||
if o.currOffset >= o.objectInfo.Size {
|
if o.currOffset >= o.objectInfo.Size {
|
||||||
return io.EOF
|
return io.EOF
|
||||||
|
@ -252,7 +279,7 @@ func (o *Object) setOffset(bytesRead int64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads up to len(p) bytes into p. It returns the number of
|
// Read reads up to len(b) bytes into b. It returns the number of
|
||||||
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
|
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
|
||||||
// io.EOF upon end of file.
|
// io.EOF upon end of file.
|
||||||
func (o *Object) Read(b []byte) (n int, err error) {
|
func (o *Object) Read(b []byte) (n int, err error) {
|
||||||
|
@ -280,27 +307,14 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
||||||
readReq.isFirstReq = true
|
readReq.isFirstReq = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify if offset has changed and currOffset is greater than
|
// Ask to establish a new data fetch routine based on seekData flag
|
||||||
// previous offset. Perhaps due to Seek().
|
readReq.DidOffsetChange = o.seekData
|
||||||
offsetChange := o.prevOffset - o.currOffset
|
|
||||||
if offsetChange < 0 {
|
|
||||||
offsetChange = -offsetChange
|
|
||||||
}
|
|
||||||
if offsetChange > 0 {
|
|
||||||
// Fetch the new reader at the current offset again.
|
|
||||||
readReq.Offset = o.currOffset
|
readReq.Offset = o.currOffset
|
||||||
readReq.DidOffsetChange = true
|
|
||||||
} else {
|
|
||||||
// No offset changes no need to fetch new reader, continue
|
|
||||||
// reading.
|
|
||||||
readReq.DidOffsetChange = false
|
|
||||||
readReq.Offset = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send and receive from the first request.
|
// Send and receive from the first request.
|
||||||
response, err := o.doGetRequest(readReq)
|
response, err := o.doGetRequest(readReq)
|
||||||
if err != nil {
|
if err != nil && err != io.EOF {
|
||||||
// Save the error.
|
// Save the error for future calls.
|
||||||
o.prevErr = err
|
o.prevErr = err
|
||||||
return response.Size, err
|
return response.Size, err
|
||||||
}
|
}
|
||||||
|
@ -309,14 +323,18 @@ func (o *Object) Read(b []byte) (n int, err error) {
|
||||||
bytesRead := int64(response.Size)
|
bytesRead := int64(response.Size)
|
||||||
|
|
||||||
// Set the new offset.
|
// Set the new offset.
|
||||||
err = o.setOffset(bytesRead)
|
oerr := o.setOffset(bytesRead)
|
||||||
if err != nil {
|
if oerr != nil {
|
||||||
return response.Size, err
|
// Save the error for future calls.
|
||||||
|
o.prevErr = oerr
|
||||||
|
return response.Size, oerr
|
||||||
}
|
}
|
||||||
return response.Size, nil
|
|
||||||
|
// Return the response.
|
||||||
|
return response.Size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat returns the ObjectInfo structure describing object.
|
// Stat returns the ObjectInfo structure describing Object.
|
||||||
func (o *Object) Stat() (ObjectInfo, error) {
|
func (o *Object) Stat() (ObjectInfo, error) {
|
||||||
if o == nil {
|
if o == nil {
|
||||||
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
|
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
|
||||||
|
@ -325,16 +343,15 @@ func (o *Object) Stat() (ObjectInfo, error) {
|
||||||
o.mutex.Lock()
|
o.mutex.Lock()
|
||||||
defer o.mutex.Unlock()
|
defer o.mutex.Unlock()
|
||||||
|
|
||||||
if o.prevErr != nil || o.isClosed {
|
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
|
||||||
return ObjectInfo{}, o.prevErr
|
return ObjectInfo{}, o.prevErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the first request.
|
// This is the first request.
|
||||||
if !o.isStarted {
|
if !o.isStarted || !o.objectInfoSet {
|
||||||
statReq := getRequest{
|
statReq := getRequest{
|
||||||
isReadOp: false, // This is a Stat not a Read/ReadAt.
|
isFirstReq: !o.isStarted,
|
||||||
Offset: 0,
|
settingObjectInfo: !o.objectInfoSet,
|
||||||
isFirstReq: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the request and get the response.
|
// Send the request and get the response.
|
||||||
|
@ -365,8 +382,9 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||||
if o.prevErr != nil || o.isClosed {
|
if o.prevErr != nil || o.isClosed {
|
||||||
return 0, o.prevErr
|
return 0, o.prevErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can only compare offsets to size when size has been set.
|
// Can only compare offsets to size when size has been set.
|
||||||
if o.isStarted {
|
if o.objectInfoSet {
|
||||||
// If offset is negative than we return io.EOF.
|
// If offset is negative than we return io.EOF.
|
||||||
// If offset is greater than or equal to object size we return io.EOF.
|
// If offset is greater than or equal to object size we return io.EOF.
|
||||||
if offset >= o.objectInfo.Size || offset < 0 {
|
if offset >= o.objectInfo.Size || offset < 0 {
|
||||||
|
@ -383,6 +401,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||||
Offset: offset, // Set the offset.
|
Offset: offset, // Set the offset.
|
||||||
Buffer: b,
|
Buffer: b,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alert that this is the first request.
|
// Alert that this is the first request.
|
||||||
if !o.isStarted {
|
if !o.isStarted {
|
||||||
readAtReq.isFirstReq = true
|
readAtReq.isFirstReq = true
|
||||||
|
@ -390,21 +409,29 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
|
||||||
|
|
||||||
// Send and receive from the first request.
|
// Send and receive from the first request.
|
||||||
response, err := o.doGetRequest(readAtReq)
|
response, err := o.doGetRequest(readAtReq)
|
||||||
if err != nil {
|
if err != nil && err != io.EOF {
|
||||||
// Save the error.
|
// Save the error.
|
||||||
o.prevErr = err
|
o.prevErr = err
|
||||||
return 0, err
|
return response.Size, err
|
||||||
}
|
}
|
||||||
// Bytes read.
|
// Bytes read.
|
||||||
bytesRead := int64(response.Size)
|
bytesRead := int64(response.Size)
|
||||||
|
// There is no valid objectInfo yet
|
||||||
// Update the offsets.
|
// to compare against for EOF.
|
||||||
err = o.setOffset(bytesRead)
|
if !o.objectInfoSet {
|
||||||
if err != nil {
|
// Update the currentOffset.
|
||||||
return response.Size, err
|
o.currOffset += bytesRead
|
||||||
|
} else {
|
||||||
|
// If this was not the first request update
|
||||||
|
// the offsets and compare against objectInfo
|
||||||
|
// for EOF.
|
||||||
|
oerr := o.setOffset(bytesRead)
|
||||||
|
if oerr != nil {
|
||||||
|
o.prevErr = oerr
|
||||||
|
return response.Size, oerr
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return response.Size, nil
|
return response.Size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek sets the offset for the next Read or Write to offset,
|
// Seek sets the offset for the next Read or Write to offset,
|
||||||
|
@ -439,7 +466,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||||
|
|
||||||
// This is the first request. So before anything else
|
// This is the first request. So before anything else
|
||||||
// get the ObjectInfo.
|
// get the ObjectInfo.
|
||||||
if !o.isStarted {
|
if !o.isStarted || !o.objectInfoSet {
|
||||||
// Create the new Seek request.
|
// Create the new Seek request.
|
||||||
seekReq := getRequest{
|
seekReq := getRequest{
|
||||||
isReadOp: false,
|
isReadOp: false,
|
||||||
|
@ -454,8 +481,6 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Save current offset as previous offset.
|
|
||||||
o.prevOffset = o.currOffset
|
|
||||||
|
|
||||||
// Switch through whence.
|
// Switch through whence.
|
||||||
switch whence {
|
switch whence {
|
||||||
|
@ -489,6 +514,10 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
|
||||||
if o.prevErr == io.EOF {
|
if o.prevErr == io.EOF {
|
||||||
o.prevErr = nil
|
o.prevErr = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ask lower level to fetch again from source
|
||||||
|
o.seekData = true
|
||||||
|
|
||||||
// Return the effective offset.
|
// Return the effective offset.
|
||||||
return o.currOffset, nil
|
return o.currOffset, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,23 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
|
||||||
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
|
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request server for policy.
|
// ListBucketPolicies - list all policies for a given prefix and all its children.
|
||||||
|
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
|
||||||
|
// Input validation.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
return map[string]policy.BucketPolicy{}, err
|
||||||
|
}
|
||||||
|
if err := isValidObjectPrefix(objectPrefix); err != nil {
|
||||||
|
return map[string]policy.BucketPolicy{}, err
|
||||||
|
}
|
||||||
|
policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return map[string]policy.BucketPolicy{}, err
|
||||||
|
}
|
||||||
|
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request server for current bucket policy.
|
||||||
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
|
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
|
||||||
// Get resources properly escaped and lined up before
|
// Get resources properly escaped and lined up before
|
||||||
// using them in http request.
|
// using them in http request.
|
||||||
|
|
12
vendor/src/github.com/minio/minio-go/api-list.go
vendored
12
vendor/src/github.com/minio/minio-go/api-list.go
vendored
|
@ -84,6 +84,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
||||||
// If recursive we do not delimit.
|
// If recursive we do not delimit.
|
||||||
delimiter = ""
|
delimiter = ""
|
||||||
}
|
}
|
||||||
|
// Return object owner information by default
|
||||||
|
fetchOwner := true
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
defer close(objectStatCh)
|
defer close(objectStatCh)
|
||||||
|
@ -108,7 +110,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
||||||
var continuationToken string
|
var continuationToken string
|
||||||
for {
|
for {
|
||||||
// Get list of objects a maximum of 1000 per request.
|
// Get list of objects a maximum of 1000 per request.
|
||||||
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter, 1000)
|
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
objectStatCh <- ObjectInfo{
|
objectStatCh <- ObjectInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
|
@ -166,7 +168,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
|
||||||
// ?delimiter - A delimiter is a character you use to group keys.
|
// ?delimiter - A delimiter is a character you use to group keys.
|
||||||
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
// ?prefix - Limits the response to keys that begin with the specified prefix.
|
||||||
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
// ?max-keys - Sets the maximum number of keys returned in the response body.
|
||||||
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter string, maxkeys int) (listBucketV2Result, error) {
|
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) {
|
||||||
// Validate bucket name.
|
// Validate bucket name.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return listBucketV2Result{}, err
|
return listBucketV2Result{}, err
|
||||||
|
@ -195,6 +197,11 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken,
|
||||||
urlValues.Set("delimiter", delimiter)
|
urlValues.Set("delimiter", delimiter)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fetch owner when listing
|
||||||
|
if fetchOwner {
|
||||||
|
urlValues.Set("fetch-owner", "true")
|
||||||
|
}
|
||||||
|
|
||||||
// maxkeys should default to 1000 or less.
|
// maxkeys should default to 1000 or less.
|
||||||
if maxkeys == 0 || maxkeys > 1000 {
|
if maxkeys == 0 || maxkeys > 1000 {
|
||||||
maxkeys = 1000
|
maxkeys = 1000
|
||||||
|
@ -475,6 +482,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
|
||||||
objectMultipartStatCh <- ObjectMultipartInfo{
|
objectMultipartStatCh <- ObjectMultipartInfo{
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -22,6 +22,9 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetBucketNotification - get bucket notification at a given path.
|
// GetBucketNotification - get bucket notification at a given path.
|
||||||
|
@ -120,7 +123,7 @@ type NotificationInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListenBucketNotification - listen on bucket notifications.
|
// ListenBucketNotification - listen on bucket notifications.
|
||||||
func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, doneCh <-chan struct{}) <-chan NotificationInfo {
|
func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
|
||||||
notificationInfoCh := make(chan NotificationInfo, 1)
|
notificationInfoCh := make(chan NotificationInfo, 1)
|
||||||
// Only success, start a routine to start reading line by line.
|
// Only success, start a routine to start reading line by line.
|
||||||
go func(notificationInfoCh chan<- NotificationInfo) {
|
go func(notificationInfoCh chan<- NotificationInfo) {
|
||||||
|
@ -135,7 +138,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check ARN partition to verify if listening bucket is supported
|
// Check ARN partition to verify if listening bucket is supported
|
||||||
if accountArn.Partition != "minio" {
|
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
notificationInfoCh <- NotificationInfo{
|
notificationInfoCh <- NotificationInfo{
|
||||||
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
|
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
|
||||||
}
|
}
|
||||||
|
@ -143,9 +146,18 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
|
||||||
}
|
}
|
||||||
|
|
||||||
// Continously run and listen on bucket notification.
|
// Continously run and listen on bucket notification.
|
||||||
for {
|
// Create a done channel to control 'ListObjects' go routine.
|
||||||
|
retryDoneCh := make(chan struct{}, 1)
|
||||||
|
|
||||||
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
|
defer close(retryDoneCh)
|
||||||
|
|
||||||
|
// Wait on the jitter retry loop.
|
||||||
|
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("notificationARN", accountArn.String())
|
urlValues.Set("prefix", prefix)
|
||||||
|
urlValues.Set("suffix", suffix)
|
||||||
|
urlValues["events"] = events
|
||||||
|
|
||||||
// Execute GET on bucket to list objects.
|
// Execute GET on bucket to list objects.
|
||||||
resp, err := c.executeMethod("GET", requestMetadata{
|
resp, err := c.executeMethod("GET", requestMetadata{
|
||||||
|
@ -153,10 +165,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
|
||||||
queryValues: urlValues,
|
queryValues: urlValues,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
notificationInfoCh <- NotificationInfo{
|
continue
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate http response, upon error return quickly.
|
// Validate http response, upon error return quickly.
|
||||||
|
@ -178,10 +187,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
|
||||||
for bio.Scan() {
|
for bio.Scan() {
|
||||||
var notificationInfo NotificationInfo
|
var notificationInfo NotificationInfo
|
||||||
if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
|
if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
|
||||||
notificationInfoCh <- NotificationInfo{
|
continue
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
// Send notifications on channel only if there are events received.
|
// Send notifications on channel only if there are events received.
|
||||||
if len(notificationInfo.Records) > 0 {
|
if len(notificationInfo.Records) > 0 {
|
||||||
|
@ -198,12 +204,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
|
||||||
// and re-connect.
|
// and re-connect.
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == io.ErrUnexpectedEOF {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
notificationInfoCh <- NotificationInfo{
|
|
||||||
Err: err,
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(notificationInfoCh)
|
}(notificationInfoCh)
|
||||||
|
|
|
@ -20,6 +20,9 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// supportedGetReqParams - supported request parameters for GET presigned request.
|
// supportedGetReqParams - supported request parameters for GET presigned request.
|
||||||
|
@ -126,14 +129,14 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
|
||||||
policyBase64 := p.base64()
|
policyBase64 := p.base64()
|
||||||
p.formData["policy"] = policyBase64
|
p.formData["policy"] = policyBase64
|
||||||
// For Google endpoint set this value to be 'GoogleAccessId'.
|
// For Google endpoint set this value to be 'GoogleAccessId'.
|
||||||
if isGoogleEndpoint(c.endpointURL) {
|
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
p.formData["GoogleAccessId"] = c.accessKeyID
|
p.formData["GoogleAccessId"] = c.accessKeyID
|
||||||
} else {
|
} else {
|
||||||
// For all other endpoints set this value to be 'AWSAccessKeyId'.
|
// For all other endpoints set this value to be 'AWSAccessKeyId'.
|
||||||
p.formData["AWSAccessKeyId"] = c.accessKeyID
|
p.formData["AWSAccessKeyId"] = c.accessKeyID
|
||||||
}
|
}
|
||||||
// Sign the policy.
|
// Sign the policy.
|
||||||
p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
|
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
|
||||||
return u, p.formData, nil
|
return u, p.formData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,7 +159,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a credential policy.
|
// Add a credential policy.
|
||||||
credential := getCredential(c.accessKeyID, location, t)
|
credential := s3signer.GetCredential(c.accessKeyID, location, t)
|
||||||
if err = p.addNewPolicy(policyCondition{
|
if err = p.addNewPolicy(policyCondition{
|
||||||
matchType: "eq",
|
matchType: "eq",
|
||||||
condition: "$x-amz-credential",
|
condition: "$x-amz-credential",
|
||||||
|
@ -172,6 +175,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
|
||||||
p.formData["x-amz-algorithm"] = signV4Algorithm
|
p.formData["x-amz-algorithm"] = signV4Algorithm
|
||||||
p.formData["x-amz-credential"] = credential
|
p.formData["x-amz-credential"] = credential
|
||||||
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
|
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
|
||||||
p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
|
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
|
||||||
return u, p.formData, nil
|
return u, p.formData, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,8 +26,10 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
)
|
)
|
||||||
|
|
||||||
/// Bucket operations
|
/// Bucket operations
|
||||||
|
@ -89,11 +91,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
|
||||||
// is the preferred method here. The final location of the
|
// is the preferred method here. The final location of the
|
||||||
// 'bucket' is provided through XML LocationConstraint data with
|
// 'bucket' is provided through XML LocationConstraint data with
|
||||||
// the request.
|
// the request.
|
||||||
targetURL, err := url.Parse(c.endpointURL)
|
targetURL := c.endpointURL
|
||||||
if err != nil {
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetURL.Path = "/" + bucketName + "/"
|
|
||||||
|
|
||||||
// get a new HTTP request for the method.
|
// get a new HTTP request for the method.
|
||||||
req, err := http.NewRequest("PUT", targetURL.String(), nil)
|
req, err := http.NewRequest("PUT", targetURL.String(), nil)
|
||||||
|
@ -133,9 +132,9 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
||||||
// regardless of the bucket's location constraint.
|
// regardless of the bucket's location constraint.
|
||||||
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
} else if c.signature.isV2() {
|
} else if c.signature.isV2() {
|
||||||
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return signed request.
|
// Return signed request.
|
||||||
|
|
|
@ -24,8 +24,10 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tests validate http request formulated for creation of bucket.
|
// Tests validate http request formulated for creation of bucket.
|
||||||
|
@ -33,14 +35,11 @@ func TestMakeBucketRequest(t *testing.T) {
|
||||||
// Generates expected http request for bucket creation.
|
// Generates expected http request for bucket creation.
|
||||||
// Used for asserting with the actual request generated.
|
// Used for asserting with the actual request generated.
|
||||||
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
|
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
|
||||||
|
targetURL := c.endpointURL
|
||||||
targetURL, err := url.Parse(c.endpointURL)
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetURL.Path = "/" + bucketName + "/"
|
|
||||||
|
|
||||||
// get a new HTTP request for the method.
|
// get a new HTTP request for the method.
|
||||||
|
var err error
|
||||||
req, err = http.NewRequest("PUT", targetURL.String(), nil)
|
req, err = http.NewRequest("PUT", targetURL.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -78,9 +77,9 @@ func TestMakeBucketRequest(t *testing.T) {
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
// Signature calculated for MakeBucket request should be for 'us-east-1',
|
||||||
// regardless of the bucket's location constraint.
|
// regardless of the bucket's location constraint.
|
||||||
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
} else if c.signature.isV2() {
|
} else if c.signature.isV2() {
|
||||||
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return signed request.
|
// Return signed request.
|
||||||
|
|
|
@ -44,18 +44,17 @@ func isReadAt(reader io.Reader) (ok bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldUploadPart - verify if part should be uploaded.
|
// shouldUploadPart - verify if part should be uploaded.
|
||||||
func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
|
func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool {
|
||||||
// If part not found should upload the part.
|
// If part not found should upload the part.
|
||||||
uploadedPart, found := objectParts[objPart.PartNumber]
|
if uploadReq.Part == nil {
|
||||||
if !found {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// if size mismatches should upload the part.
|
// if size mismatches should upload the part.
|
||||||
if objPart.Size != uploadedPart.Size {
|
if objPart.Size != uploadReq.Part.Size {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// if md5sum mismatches should upload the part.
|
// if md5sum mismatches should upload the part.
|
||||||
if objPart.ETag != uploadedPart.ETag {
|
if objPart.ETag != uploadReq.Part.ETag {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -68,7 +67,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
|
||||||
// object storage it will have the following parameters as constants.
|
// object storage it will have the following parameters as constants.
|
||||||
//
|
//
|
||||||
// maxPartsCount - 10000
|
// maxPartsCount - 10000
|
||||||
// minPartSize - 5MiB
|
// minPartSize - 64MiB
|
||||||
// maxMultipartPutObjectSize - 5TiB
|
// maxMultipartPutObjectSize - 5TiB
|
||||||
//
|
//
|
||||||
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
|
||||||
|
@ -167,37 +166,64 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
|
||||||
|
|
||||||
// getUploadID - fetch upload id if already present for an object name
|
// getUploadID - fetch upload id if already present for an object name
|
||||||
// or initiate a new request to fetch a new upload id.
|
// or initiate a new request to fetch a new upload id.
|
||||||
func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
|
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return "", false, err
|
return "", err
|
||||||
}
|
}
|
||||||
if err := isValidObjectName(objectName); err != nil {
|
if err := isValidObjectName(objectName); err != nil {
|
||||||
return "", false, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set content Type to default if empty string.
|
|
||||||
if contentType == "" {
|
|
||||||
contentType = "application/octet-stream"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find upload id for previous upload for an object.
|
|
||||||
uploadID, err = c.findUploadID(bucketName, objectName)
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
if uploadID == "" {
|
|
||||||
// Initiate multipart upload for an object.
|
// Initiate multipart upload for an object.
|
||||||
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
|
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, err
|
return "", err
|
||||||
}
|
}
|
||||||
// Save the new upload id.
|
return initMultipartUploadResult.UploadID, nil
|
||||||
uploadID = initMultipartUploadResult.UploadID
|
}
|
||||||
// Indicate that this is a new upload id.
|
|
||||||
isNew = true
|
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
|
||||||
|
// or initiate a new multipart session if no current one found
|
||||||
|
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) {
|
||||||
|
// A map of all uploaded parts.
|
||||||
|
var partsInfo map[int]objectPart
|
||||||
|
var err error
|
||||||
|
|
||||||
|
uploadID, err := c.findUploadID(bucketName, objectName)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
}
|
}
|
||||||
return uploadID, isNew, nil
|
|
||||||
|
if uploadID == "" {
|
||||||
|
// Initiates a new multipart request
|
||||||
|
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Fetch previously upload parts and maximum part size.
|
||||||
|
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
|
||||||
|
if err != nil {
|
||||||
|
// When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
|
||||||
|
// initiate a new multipart upload
|
||||||
|
if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
|
||||||
|
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate partsInfo if not done yet
|
||||||
|
if partsInfo == nil {
|
||||||
|
partsInfo = make(map[int]objectPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
return uploadID, partsInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// computeHash - Calculates hashes for an input read Seeker.
|
// computeHash - Calculates hashes for an input read Seeker.
|
||||||
|
|
|
@ -16,7 +16,11 @@
|
||||||
|
|
||||||
package minio
|
package minio
|
||||||
|
|
||||||
import "net/http"
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
|
)
|
||||||
|
|
||||||
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
|
||||||
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
|
||||||
|
@ -38,7 +42,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set copy source.
|
// Set copy source.
|
||||||
customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
|
customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
|
||||||
|
|
||||||
// Execute PUT on objectName.
|
// Execute PUT on objectName.
|
||||||
resp, err := c.executeMethod("PUT", requestMetadata{
|
resp, err := c.executeMethod("PUT", requestMetadata{
|
||||||
|
|
|
@ -28,6 +28,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FPutObject - Create an object in a bucket, with contents from file at filePath.
|
// FPutObject - Create an object in a bucket, with contents from file at filePath.
|
||||||
|
@ -62,6 +64,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
objMetadata := make(map[string][]string)
|
||||||
|
|
||||||
// Set contentType based on filepath extension if not given or default
|
// Set contentType based on filepath extension if not given or default
|
||||||
// value of "binary/octet-stream" if the extension has no associated type.
|
// value of "binary/octet-stream" if the extension has no associated type.
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
|
@ -70,9 +74,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
objMetadata["Content-Type"] = []string{contentType}
|
||||||
|
|
||||||
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
|
||||||
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
|
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
|
||||||
if isGoogleEndpoint(c.endpointURL) {
|
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
if fileSize > int64(maxSinglePutObjectSize) {
|
if fileSize > int64(maxSinglePutObjectSize) {
|
||||||
return 0, ErrorResponse{
|
return 0, ErrorResponse{
|
||||||
Code: "NotImplemented",
|
Code: "NotImplemented",
|
||||||
|
@ -82,11 +88,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
|
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: S3 doesn't allow anonymous multipart requests.
|
// NOTE: S3 doesn't allow anonymous multipart requests.
|
||||||
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
|
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
|
||||||
if fileSize > int64(maxSinglePutObjectSize) {
|
if fileSize > int64(maxSinglePutObjectSize) {
|
||||||
return 0, ErrorResponse{
|
return 0, ErrorResponse{
|
||||||
Code: "NotImplemented",
|
Code: "NotImplemented",
|
||||||
|
@ -97,15 +103,15 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
}
|
}
|
||||||
// Do not compute MD5 for anonymous requests to Amazon
|
// Do not compute MD5 for anonymous requests to Amazon
|
||||||
// S3. Uploads up to 5GiB in size.
|
// S3. Uploads up to 5GiB in size.
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
|
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
|
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
|
||||||
if fileSize < minPartSize && fileSize >= 0 {
|
if fileSize < minPartSize && fileSize >= 0 {
|
||||||
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
|
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
// Upload all large objects as multipart.
|
// Upload all large objects as multipart.
|
||||||
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
|
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errResp := ToErrorResponse(err)
|
errResp := ToErrorResponse(err)
|
||||||
// Verify if multipart functionality is not available, if not
|
// Verify if multipart functionality is not available, if not
|
||||||
|
@ -116,7 +122,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
// Fall back to uploading as single PutObject operation.
|
// Fall back to uploading as single PutObject operation.
|
||||||
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
|
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
@ -131,7 +137,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
|
||||||
// against MD5SUM of each individual parts. This function also
|
// against MD5SUM of each individual parts. This function also
|
||||||
// effectively utilizes file system capabilities of reading from
|
// effectively utilizes file system capabilities of reading from
|
||||||
// specific sections and not having to create temporary files.
|
// specific sections and not having to create temporary files.
|
||||||
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
|
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -140,9 +146,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get upload id for an object, initiates a new multipart request
|
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
||||||
// if it cannot find any previously partially uploaded object.
|
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
||||||
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -151,34 +156,41 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
||||||
var totalUploadedSize int64
|
var totalUploadedSize int64
|
||||||
|
|
||||||
// Complete multipart upload.
|
// Complete multipart upload.
|
||||||
var completeMultipartUpload completeMultipartUpload
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
// A map of all uploaded parts.
|
|
||||||
var partsInfo = make(map[int]objectPart)
|
|
||||||
|
|
||||||
// If this session is a continuation of a previous session fetch all
|
|
||||||
// previously uploaded parts info.
|
|
||||||
if !isNew {
|
|
||||||
// Fetch previously upload parts and maximum part size.
|
|
||||||
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the optimal parts info for a given size.
|
// Calculate the optimal parts info for a given size.
|
||||||
totalPartsCount, partSize, _, err := optimalPartInfo(fileSize)
|
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Part number always starts with '1'.
|
// Create a channel to communicate a part was uploaded.
|
||||||
partNumber := 1
|
// Buffer this to 10000, the maximum number of parts allowed by S3.
|
||||||
|
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||||
|
|
||||||
for partNumber <= totalPartsCount {
|
// Create a channel to communicate which part to upload.
|
||||||
// Get a section reader on a particular offset.
|
// Buffer this to 10000, the maximum number of parts allowed by S3.
|
||||||
sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
|
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||||
|
|
||||||
|
// Just for readability.
|
||||||
|
lastPartNumber := totalPartsCount
|
||||||
|
|
||||||
|
// Send each part through the partUploadCh to be uploaded.
|
||||||
|
for p := 1; p <= totalPartsCount; p++ {
|
||||||
|
part, ok := partsInfo[p]
|
||||||
|
if ok {
|
||||||
|
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
|
||||||
|
} else {
|
||||||
|
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(uploadPartsCh)
|
||||||
|
|
||||||
|
// Use three 'workers' to upload parts in parallel.
|
||||||
|
for w := 1; w <= 3; w++ {
|
||||||
|
go func() {
|
||||||
|
// Deal with each part as it comes through the channel.
|
||||||
|
for uploadReq := range uploadPartsCh {
|
||||||
// Add hash algorithms that need to be calculated by computeHash()
|
// Add hash algorithms that need to be calculated by computeHash()
|
||||||
// In case of a non-v4 signature or https connection, sha256 is not needed.
|
// In case of a non-v4 signature or https connection, sha256 is not needed.
|
||||||
hashAlgos := make(map[string]hash.Hash)
|
hashAlgos := make(map[string]hash.Hash)
|
||||||
|
@ -188,46 +200,95 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
||||||
hashAlgos["sha256"] = sha256.New()
|
hashAlgos["sha256"] = sha256.New()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If partNumber was not uploaded we calculate the missing
|
||||||
|
// part offset and size. For all other part numbers we
|
||||||
|
// calculate offset based on multiples of partSize.
|
||||||
|
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||||
|
missingPartSize := partSize
|
||||||
|
|
||||||
|
// As a special case if partNumber is lastPartNumber, we
|
||||||
|
// calculate the offset based on the last part size.
|
||||||
|
if uploadReq.PartNum == lastPartNumber {
|
||||||
|
readOffset = (fileSize - lastPartSize)
|
||||||
|
missingPartSize = lastPartSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a section reader on a particular offset.
|
||||||
|
sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
|
||||||
var prtSize int64
|
var prtSize int64
|
||||||
|
var err error
|
||||||
|
|
||||||
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
|
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader io.Reader
|
// Create the part to be uploaded.
|
||||||
// Update progress reader appropriately to the latest offset
|
verifyObjPart := objectPart{
|
||||||
// as we read from the source.
|
ETag: hex.EncodeToString(hashSums["md5"]),
|
||||||
reader = newHook(sectionReader, progress)
|
PartNumber: uploadReq.PartNum,
|
||||||
|
Size: partSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is the last part do not give it the full part size.
|
||||||
|
if uploadReq.PartNum == lastPartNumber {
|
||||||
|
verifyObjPart.Size = lastPartSize
|
||||||
|
}
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
// Verify if part should be uploaded.
|
||||||
if shouldUploadPart(objectPart{
|
if shouldUploadPart(verifyObjPart, uploadReq) {
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
|
||||||
PartNumber: partNumber,
|
|
||||||
Size: prtSize,
|
|
||||||
}, partsInfo) {
|
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
hashSums["md5"], hashSums["sha256"], prtSize)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalUploadedSize, err
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
// Save successfully uploaded part metadata.
|
// Save successfully uploaded part metadata.
|
||||||
partsInfo[partNumber] = objPart
|
uploadReq.Part = &objPart
|
||||||
} else {
|
}
|
||||||
// Update the progress reader for the skipped part.
|
// Return through the channel the part size.
|
||||||
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Size: verifyObjPart.Size,
|
||||||
|
PartNum: uploadReq.PartNum,
|
||||||
|
Part: uploadReq.Part,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve each uploaded part once it is done.
|
||||||
|
for u := 1; u <= totalPartsCount; u++ {
|
||||||
|
uploadRes := <-uploadedPartsCh
|
||||||
|
if uploadRes.Error != nil {
|
||||||
|
return totalUploadedSize, uploadRes.Error
|
||||||
|
}
|
||||||
|
// Retrieve each uploaded part and store it to be completed.
|
||||||
|
part := uploadRes.Part
|
||||||
|
if part == nil {
|
||||||
|
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||||
|
}
|
||||||
|
// Update the total uploaded size.
|
||||||
|
totalUploadedSize += uploadRes.Size
|
||||||
|
// Update the progress bar if there is one.
|
||||||
if progress != nil {
|
if progress != nil {
|
||||||
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
|
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
|
||||||
return totalUploadedSize, err
|
return totalUploadedSize, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
// Store the part to be completed.
|
||||||
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
||||||
// Save successfully uploaded size.
|
ETag: part.ETag,
|
||||||
totalUploadedSize += prtSize
|
PartNumber: part.PartNumber,
|
||||||
|
})
|
||||||
// Increment part number.
|
|
||||||
partNumber++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify if we uploaded all data.
|
// Verify if we uploaded all data.
|
||||||
|
@ -235,22 +296,9 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
|
||||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
|
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
|
|
||||||
for _, part := range partsInfo {
|
|
||||||
var complPart completePart
|
|
||||||
complPart.ETag = part.ETag
|
|
||||||
complPart.PartNumber = part.PartNumber
|
|
||||||
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify if totalPartsCount is not equal to total list of parts.
|
|
||||||
if totalPartsCount != len(completeMultipartUpload.Parts) {
|
|
||||||
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(completeMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
|
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalUploadedSize, err
|
return totalUploadedSize, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -44,11 +45,11 @@ import (
|
||||||
// If we exhaust all the known types, code proceeds to use stream as
|
// If we exhaust all the known types, code proceeds to use stream as
|
||||||
// is where each part is re-downloaded, checksummed and verified
|
// is where each part is re-downloaded, checksummed and verified
|
||||||
// before upload.
|
// before upload.
|
||||||
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
if size > 0 && size > minPartSize {
|
if size > 0 && size > minPartSize {
|
||||||
// Verify if reader is *os.File, then use file system functionalities.
|
// Verify if reader is *os.File, then use file system functionalities.
|
||||||
if isFile(reader) {
|
if isFile(reader) {
|
||||||
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
|
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
|
||||||
}
|
}
|
||||||
// Verify if reader is *minio.Object or io.ReaderAt.
|
// Verify if reader is *minio.Object or io.ReaderAt.
|
||||||
// NOTE: Verification of object is kept for a specific purpose
|
// NOTE: Verification of object is kept for a specific purpose
|
||||||
|
@ -57,17 +58,17 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
|
||||||
// and such a functionality is used in the subsequent code
|
// and such a functionality is used in the subsequent code
|
||||||
// path.
|
// path.
|
||||||
if isObject(reader) || isReadAt(reader) {
|
if isObject(reader) || isReadAt(reader) {
|
||||||
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
|
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// For any other data size and reader type we do generic multipart
|
// For any other data size and reader type we do generic multipart
|
||||||
// approach by staging data in temporary files and uploading them.
|
// approach by staging data in temporary files and uploading them.
|
||||||
return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
|
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObjectStream uploads files bigger than 5MiB, and also supports
|
// putObjectStream uploads files bigger than 64MiB, and also supports
|
||||||
// special case where size is unknown i.e '-1'.
|
// special case where size is unknown i.e '-1'.
|
||||||
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -82,26 +83,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
// Complete multipart upload.
|
// Complete multipart upload.
|
||||||
var complMultipartUpload completeMultipartUpload
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
// A map of all previously uploaded parts.
|
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
||||||
var partsInfo = make(map[int]objectPart)
|
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
||||||
|
|
||||||
// getUploadID for an object, initiates a new multipart request
|
|
||||||
// if it cannot find any previously partially uploaded object.
|
|
||||||
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If This session is a continuation of a previous session fetch all
|
|
||||||
// previously uploaded parts info.
|
|
||||||
if !isNew {
|
|
||||||
// Fetch previously uploaded parts and maximum part size.
|
|
||||||
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the optimal parts info for a given size.
|
// Calculate the optimal parts info for a given size.
|
||||||
totalPartsCount, partSize, _, err := optimalPartInfo(size)
|
totalPartsCount, partSize, _, err := optimalPartInfo(size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -115,7 +102,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
tmpBuffer := new(bytes.Buffer)
|
tmpBuffer := new(bytes.Buffer)
|
||||||
|
|
||||||
for partNumber <= totalPartsCount {
|
for partNumber <= totalPartsCount {
|
||||||
|
|
||||||
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
|
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
|
||||||
// with non-v4 signature request or HTTPS connection
|
// with non-v4 signature request or HTTPS connection
|
||||||
hashSums := make(map[string][]byte)
|
hashSums := make(map[string][]byte)
|
||||||
|
@ -138,12 +124,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
// as we read from the source.
|
// as we read from the source.
|
||||||
reader = newHook(tmpBuffer, progress)
|
reader = newHook(tmpBuffer, progress)
|
||||||
|
|
||||||
|
part, ok := partsInfo[partNumber]
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
// Verify if part should be uploaded.
|
||||||
if shouldUploadPart(objectPart{
|
if !ok || shouldUploadPart(objectPart{
|
||||||
ETag: hex.EncodeToString(hashSums["md5"]),
|
ETag: hex.EncodeToString(hashSums["md5"]),
|
||||||
PartNumber: partNumber,
|
PartNumber: partNumber,
|
||||||
Size: prtSize,
|
Size: prtSize,
|
||||||
}, partsInfo) {
|
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
|
@ -169,14 +157,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
// Save successfully uploaded size.
|
// Save successfully uploaded size.
|
||||||
totalUploadedSize += prtSize
|
totalUploadedSize += prtSize
|
||||||
|
|
||||||
|
// Increment part number.
|
||||||
|
partNumber++
|
||||||
|
|
||||||
// For unknown size, Read EOF we break away.
|
// For unknown size, Read EOF we break away.
|
||||||
// We do not have to upload till totalPartsCount.
|
// We do not have to upload till totalPartsCount.
|
||||||
if size < 0 && rErr == io.EOF {
|
if size < 0 && rErr == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment part number.
|
|
||||||
partNumber++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify if we uploaded all the data.
|
// Verify if we uploaded all the data.
|
||||||
|
@ -186,19 +174,17 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
|
// Loop over total uploaded parts to save them in
|
||||||
for _, part := range partsInfo {
|
// Parts array before completing the multipart request.
|
||||||
var complPart completePart
|
for i := 1; i < partNumber; i++ {
|
||||||
complPart.ETag = part.ETag
|
part, ok := partsInfo[i]
|
||||||
complPart.PartNumber = part.PartNumber
|
if !ok {
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
|
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 {
|
|
||||||
// Verify if totalPartsCount is not equal to total list of parts.
|
|
||||||
if totalPartsCount != len(complMultipartUpload.Parts) {
|
|
||||||
return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts))
|
|
||||||
}
|
}
|
||||||
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
||||||
|
ETag: part.ETag,
|
||||||
|
PartNumber: part.PartNumber,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
|
@ -213,7 +199,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
|
||||||
}
|
}
|
||||||
|
|
||||||
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
|
||||||
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
|
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return initiateMultipartUploadResult{}, err
|
return initiateMultipartUploadResult{}, err
|
||||||
|
@ -226,13 +212,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
|
||||||
urlValues := make(url.Values)
|
urlValues := make(url.Values)
|
||||||
urlValues.Set("uploads", "")
|
urlValues.Set("uploads", "")
|
||||||
|
|
||||||
if contentType == "" {
|
|
||||||
contentType = "application/octet-stream"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set ContentType header.
|
// Set ContentType header.
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
customHeader.Set("Content-Type", contentType)
|
for k, v := range metaData {
|
||||||
|
if len(v) > 0 {
|
||||||
|
customHeader.Set(k, v[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a default content-type header if the latter is not provided
|
||||||
|
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||||
|
customHeader.Set("Content-Type", "application/octet-stream")
|
||||||
|
}
|
||||||
|
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
bucketName: bucketName,
|
bucketName: bucketName,
|
||||||
|
|
|
@ -16,10 +16,22 @@
|
||||||
|
|
||||||
package minio
|
package minio
|
||||||
|
|
||||||
import "io"
|
import (
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
// PutObjectWithProgress - With progress.
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PutObjectWithProgress - with progress.
|
||||||
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
|
||||||
|
metaData := make(map[string][]string)
|
||||||
|
metaData["Content-Type"] = []string{contentType}
|
||||||
|
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutObjectWithMetadata - with metadata.
|
||||||
|
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -47,7 +59,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
||||||
|
|
||||||
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
|
||||||
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
// So we fall back to single PUT operation with the maximum limit of 5GiB.
|
||||||
if isGoogleEndpoint(c.endpointURL) {
|
if s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
if size <= -1 {
|
if size <= -1 {
|
||||||
return 0, ErrorResponse{
|
return 0, ErrorResponse{
|
||||||
Code: "NotImplemented",
|
Code: "NotImplemented",
|
||||||
|
@ -60,11 +72,11 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
||||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
|
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: S3 doesn't allow anonymous multipart requests.
|
// NOTE: S3 doesn't allow anonymous multipart requests.
|
||||||
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
|
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
|
||||||
if size <= -1 {
|
if size <= -1 {
|
||||||
return 0, ErrorResponse{
|
return 0, ErrorResponse{
|
||||||
Code: "NotImplemented",
|
Code: "NotImplemented",
|
||||||
|
@ -78,26 +90,26 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
|
||||||
}
|
}
|
||||||
// Do not compute MD5 for anonymous requests to Amazon
|
// Do not compute MD5 for anonymous requests to Amazon
|
||||||
// S3. Uploads up to 5GiB in size.
|
// S3. Uploads up to 5GiB in size.
|
||||||
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
|
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putSmall object.
|
// putSmall object.
|
||||||
if size < minPartSize && size >= 0 {
|
if size < minPartSize && size >= 0 {
|
||||||
return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
|
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
// For all sizes greater than 5MiB do multipart.
|
// For all sizes greater than 5MiB do multipart.
|
||||||
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
|
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errResp := ToErrorResponse(err)
|
errResp := ToErrorResponse(err)
|
||||||
// Verify if multipart functionality is not available, if not
|
// Verify if multipart functionality is not available, if not
|
||||||
// fall back to single PutObject operation.
|
// fall back to single PutObject operation.
|
||||||
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
|
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
|
||||||
// Verify if size of reader is greater than '5GiB'.
|
// Verify if size of reader is greater than '5GiB'.
|
||||||
if size > maxSinglePutObjectSize {
|
if size > maxSinglePutObjectSize {
|
||||||
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
// Fall back to uploading as single PutObject operation.
|
// Fall back to uploading as single PutObject operation.
|
||||||
return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
|
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,21 +20,34 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// uploadedPartRes - the response received from a part upload.
|
||||||
|
type uploadedPartRes struct {
|
||||||
|
Error error // Any error encountered while uploading the part.
|
||||||
|
PartNum int // Number of the part uploaded.
|
||||||
|
Size int64 // Size of the part uploaded.
|
||||||
|
Part *objectPart
|
||||||
|
}
|
||||||
|
|
||||||
|
type uploadPartReq struct {
|
||||||
|
PartNum int // Number of the part uploaded.
|
||||||
|
Part *objectPart // Size of the part uploaded.
|
||||||
|
}
|
||||||
|
|
||||||
// shouldUploadPartReadAt - verify if part should be uploaded.
|
// shouldUploadPartReadAt - verify if part should be uploaded.
|
||||||
func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
|
func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool {
|
||||||
// If part not found part should be uploaded.
|
// If part not found part should be uploaded.
|
||||||
uploadedPart, found := objectParts[objPart.PartNumber]
|
if uploadReq.Part == nil {
|
||||||
if !found {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// if size mismatches part should be uploaded.
|
// if size mismatches part should be uploaded.
|
||||||
if uploadedPart.Size != objPart.Size {
|
if uploadReq.Part.Size != objPart.Size {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -50,7 +63,7 @@ func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart)
|
||||||
// temporary files for staging all the data, these temporary files are
|
// temporary files for staging all the data, these temporary files are
|
||||||
// cleaned automatically when the caller i.e http client closes the
|
// cleaned automatically when the caller i.e http client closes the
|
||||||
// stream after uploading all the contents successfully.
|
// stream after uploading all the contents successfully.
|
||||||
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -59,9 +72,8 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get upload id for an object, initiates a new multipart request
|
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
|
||||||
// if it cannot find any previously partially uploaded object.
|
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
|
||||||
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -72,74 +84,56 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||||
// Complete multipart upload.
|
// Complete multipart upload.
|
||||||
var complMultipartUpload completeMultipartUpload
|
var complMultipartUpload completeMultipartUpload
|
||||||
|
|
||||||
// A map of all uploaded parts.
|
|
||||||
var partsInfo = make(map[int]objectPart)
|
|
||||||
|
|
||||||
// Fetch all parts info previously uploaded.
|
|
||||||
if !isNew {
|
|
||||||
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the optimal parts info for a given size.
|
// Calculate the optimal parts info for a given size.
|
||||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Used for readability, lastPartNumber is always
|
// Used for readability, lastPartNumber is always totalPartsCount.
|
||||||
// totalPartsCount.
|
|
||||||
lastPartNumber := totalPartsCount
|
lastPartNumber := totalPartsCount
|
||||||
|
|
||||||
// partNumber always starts with '1'.
|
// Declare a channel that sends the next part number to be uploaded.
|
||||||
partNumber := 1
|
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||||
|
// by S3.
|
||||||
|
uploadPartsCh := make(chan uploadPartReq, 10000)
|
||||||
|
|
||||||
// Initialize a temporary buffer.
|
// Declare a channel that sends back the response of a part upload.
|
||||||
tmpBuffer := new(bytes.Buffer)
|
// Buffered to 10000 because thats the maximum number of parts allowed
|
||||||
|
// by S3.
|
||||||
|
uploadedPartsCh := make(chan uploadedPartRes, 10000)
|
||||||
|
|
||||||
|
// Send each part number to the channel to be processed.
|
||||||
|
for p := 1; p <= totalPartsCount; p++ {
|
||||||
|
part, ok := partsInfo[p]
|
||||||
|
if ok {
|
||||||
|
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
|
||||||
|
} else {
|
||||||
|
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(uploadPartsCh)
|
||||||
|
|
||||||
|
// Receive each part number from the channel allowing three parallel uploads.
|
||||||
|
for w := 1; w <= 3; w++ {
|
||||||
|
go func() {
|
||||||
// Read defaults to reading at 5MiB buffer.
|
// Read defaults to reading at 5MiB buffer.
|
||||||
readAtBuffer := make([]byte, optimalReadBufferSize)
|
readAtBuffer := make([]byte, optimalReadBufferSize)
|
||||||
|
|
||||||
// Upload all the missing parts.
|
// Each worker will draw from the part channel and upload in parallel.
|
||||||
for partNumber <= lastPartNumber {
|
for uploadReq := range uploadPartsCh {
|
||||||
// Verify object if its uploaded.
|
// Declare a new tmpBuffer.
|
||||||
verifyObjPart := objectPart{
|
tmpBuffer := new(bytes.Buffer)
|
||||||
PartNumber: partNumber,
|
|
||||||
Size: partSize,
|
|
||||||
}
|
|
||||||
// Special case if we see a last part number, save last part
|
|
||||||
// size as the proper part size.
|
|
||||||
if partNumber == lastPartNumber {
|
|
||||||
verifyObjPart = objectPart{
|
|
||||||
PartNumber: lastPartNumber,
|
|
||||||
Size: lastPartSize,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify if part should be uploaded.
|
|
||||||
if !shouldUploadPartReadAt(verifyObjPart, partsInfo) {
|
|
||||||
// Increment part number when not uploaded.
|
|
||||||
partNumber++
|
|
||||||
if progress != nil {
|
|
||||||
// Update the progress reader for the skipped part.
|
|
||||||
if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If partNumber was not uploaded we calculate the missing
|
// If partNumber was not uploaded we calculate the missing
|
||||||
// part offset and size. For all other part numbers we
|
// part offset and size. For all other part numbers we
|
||||||
// calculate offset based on multiples of partSize.
|
// calculate offset based on multiples of partSize.
|
||||||
readOffset := int64(partNumber-1) * partSize
|
readOffset := int64(uploadReq.PartNum-1) * partSize
|
||||||
missingPartSize := partSize
|
missingPartSize := partSize
|
||||||
|
|
||||||
// As a special case if partNumber is lastPartNumber, we
|
// As a special case if partNumber is lastPartNumber, we
|
||||||
// calculate the offset based on the last part size.
|
// calculate the offset based on the last part size.
|
||||||
if partNumber == lastPartNumber {
|
if uploadReq.PartNum == lastPartNumber {
|
||||||
readOffset = (size - lastPartSize)
|
readOffset = (size - lastPartSize)
|
||||||
missingPartSize = lastPartSize
|
missingPartSize = lastPartSize
|
||||||
}
|
}
|
||||||
|
@ -157,42 +151,83 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||||
}
|
}
|
||||||
|
|
||||||
var prtSize int64
|
var prtSize int64
|
||||||
|
var err error
|
||||||
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
|
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
// Send the error back through the channel.
|
||||||
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Size: 0,
|
||||||
|
Error: err,
|
||||||
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var reader io.Reader
|
// Verify object if its uploaded.
|
||||||
// Update progress reader appropriately to the latest offset
|
verifyObjPart := objectPart{
|
||||||
// as we read from the source.
|
PartNumber: uploadReq.PartNum,
|
||||||
reader = newHook(tmpBuffer, progress)
|
Size: partSize,
|
||||||
|
}
|
||||||
|
// Special case if we see a last part number, save last part
|
||||||
|
// size as the proper part size.
|
||||||
|
if uploadReq.PartNum == lastPartNumber {
|
||||||
|
verifyObjPart.Size = lastPartSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only upload the necessary parts. Otherwise return size through channel
|
||||||
|
// to update any progress bar.
|
||||||
|
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
|
||||||
// Proceed to upload the part.
|
// Proceed to upload the part.
|
||||||
var objPart objectPart
|
var objPart objectPart
|
||||||
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
|
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Reset the buffer upon any error.
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
tmpBuffer.Reset()
|
Size: 0,
|
||||||
return 0, err
|
Error: err,
|
||||||
|
}
|
||||||
|
// Exit the goroutine.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save successfully uploaded part metadata.
|
// Save successfully uploaded part metadata.
|
||||||
partsInfo[partNumber] = objPart
|
uploadReq.Part = &objPart
|
||||||
|
}
|
||||||
// Increment part number here after successful part upload.
|
// Send successful part info through the channel.
|
||||||
partNumber++
|
uploadedPartsCh <- uploadedPartRes{
|
||||||
|
Size: verifyObjPart.Size,
|
||||||
// Reset the buffer.
|
PartNum: uploadReq.PartNum,
|
||||||
tmpBuffer.Reset()
|
Part: uploadReq.Part,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
|
// Gather the responses as they occur and update any
|
||||||
for _, part := range partsInfo {
|
// progress bar.
|
||||||
var complPart completePart
|
for u := 1; u <= totalPartsCount; u++ {
|
||||||
complPart.ETag = part.ETag
|
uploadRes := <-uploadedPartsCh
|
||||||
complPart.PartNumber = part.PartNumber
|
if uploadRes.Error != nil {
|
||||||
totalUploadedSize += part.Size
|
return totalUploadedSize, uploadRes.Error
|
||||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
|
}
|
||||||
|
// Retrieve each uploaded part and store it to be completed.
|
||||||
|
// part, ok := partsInfo[uploadRes.PartNum]
|
||||||
|
part := uploadRes.Part
|
||||||
|
if part == nil {
|
||||||
|
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
|
||||||
|
}
|
||||||
|
// Update the totalUploadedSize.
|
||||||
|
totalUploadedSize += uploadRes.Size
|
||||||
|
// Update the progress bar if there is one.
|
||||||
|
if progress != nil {
|
||||||
|
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
|
||||||
|
return totalUploadedSize, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Store the parts to be completed in order.
|
||||||
|
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
|
||||||
|
ETag: part.ETag,
|
||||||
|
PartNumber: part.PartNumber,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify if we uploaded all the data.
|
// Verify if we uploaded all the data.
|
||||||
|
@ -200,11 +235,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
|
||||||
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify if totalPartsCount is not equal to total list of parts.
|
|
||||||
if totalPartsCount != len(complMultipartUpload.Parts) {
|
|
||||||
return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort all completed parts.
|
// Sort all completed parts.
|
||||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||||
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
|
||||||
|
|
|
@ -103,11 +103,10 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
|
||||||
// implement Seekable calls. Ignore them and treat
|
// implement Seekable calls. Ignore them and treat
|
||||||
// them like a stream with unknown length.
|
// them like a stream with unknown length.
|
||||||
switch st.Name() {
|
switch st.Name() {
|
||||||
case "stdin":
|
case "stdin", "stdout", "stderr":
|
||||||
fallthrough
|
return
|
||||||
case "stdout":
|
// Ignore read/write stream of os.Pipe() which have unknown length too.
|
||||||
fallthrough
|
case "|0", "|1":
|
||||||
case "stderr":
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
size = st.Size()
|
size = st.Size()
|
||||||
|
@ -151,7 +150,7 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
|
||||||
|
|
||||||
// putObjectNoChecksum special function used Google Cloud Storage. This special function
|
// putObjectNoChecksum special function used Google Cloud Storage. This special function
|
||||||
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
|
||||||
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -169,7 +168,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
||||||
|
|
||||||
// This function does not calculate sha256 and md5sum for payload.
|
// This function does not calculate sha256 and md5sum for payload.
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -181,7 +180,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
|
||||||
|
|
||||||
// putObjectSingle is a special function for uploading single put object request.
|
// putObjectSingle is a special function for uploading single put object request.
|
||||||
// This special function is used as a fallback when multipart upload fails.
|
// This special function is used as a fallback when multipart upload fails.
|
||||||
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
|
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -221,6 +220,9 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
}
|
}
|
||||||
defer tmpFile.Close()
|
defer tmpFile.Close()
|
||||||
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
|
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
// Seek back to beginning of the temporary file.
|
// Seek back to beginning of the temporary file.
|
||||||
if _, err = tmpFile.Seek(0, 0); err != nil {
|
if _, err = tmpFile.Seek(0, 0); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -234,7 +236,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Execute put object.
|
// Execute put object.
|
||||||
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
|
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -252,7 +254,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
|
||||||
|
|
||||||
// putObjectDo - executes the put object http operation.
|
// putObjectDo - executes the put object http operation.
|
||||||
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
|
||||||
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
|
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
if err := isValidBucketName(bucketName); err != nil {
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -269,13 +271,20 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
|
||||||
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.TrimSpace(contentType) == "" {
|
|
||||||
contentType = "application/octet-stream"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set headers.
|
// Set headers.
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
customHeader.Set("Content-Type", contentType)
|
|
||||||
|
// Set metadata to headers
|
||||||
|
for k, v := range metaData {
|
||||||
|
if len(v) > 0 {
|
||||||
|
customHeader.Set(k, v[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If Content-Type is not provided, set the default application/octet-stream one
|
||||||
|
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
|
||||||
|
customHeader.Set("Content-Type", "application/octet-stream")
|
||||||
|
}
|
||||||
|
|
||||||
// Populate request metadata.
|
// Populate request metadata.
|
||||||
reqMetadata := requestMetadata{
|
reqMetadata := requestMetadata{
|
||||||
|
@ -300,13 +309,13 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var metadata ObjectInfo
|
var objInfo ObjectInfo
|
||||||
// Trim off the odd double quotes from ETag in the beginning and end.
|
// Trim off the odd double quotes from ETag in the beginning and end.
|
||||||
metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||||
metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
|
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
|
||||||
// A success here means data was written to server successfully.
|
// A success here means data was written to server successfully.
|
||||||
metadata.Size = size
|
objInfo.Size = size
|
||||||
|
|
||||||
// Return here.
|
// Return here.
|
||||||
return metadata, nil
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
|
|
133
vendor/src/github.com/minio/minio-go/api-remove.go
vendored
133
vendor/src/github.com/minio/minio-go/api-remove.go
vendored
|
@ -17,6 +17,9 @@
|
||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
@ -68,12 +71,142 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if resp != nil {
|
||||||
|
// if some unexpected error happened and max retry is reached, we want to let client know
|
||||||
|
if resp.StatusCode != http.StatusNoContent {
|
||||||
|
return httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteObject always responds with http '204' even for
|
// DeleteObject always responds with http '204' even for
|
||||||
// objects which do not exist. So no need to handle them
|
// objects which do not exist. So no need to handle them
|
||||||
// specifically.
|
// specifically.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RemoveObjectError - container of Multi Delete S3 API error
|
||||||
|
type RemoveObjectError struct {
|
||||||
|
ObjectName string
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
|
||||||
|
func generateRemoveMultiObjectsRequest(objects []string) []byte {
|
||||||
|
rmObjects := []deleteObject{}
|
||||||
|
for _, obj := range objects {
|
||||||
|
rmObjects = append(rmObjects, deleteObject{Key: obj})
|
||||||
|
}
|
||||||
|
xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
|
||||||
|
return xmlBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
|
||||||
|
// and return the success/failure result status for each object
|
||||||
|
func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
|
||||||
|
// Parse multi delete XML response
|
||||||
|
rmResult := &deleteMultiObjectsResult{}
|
||||||
|
err := xmlDecoder(body, rmResult)
|
||||||
|
if err != nil {
|
||||||
|
errorCh <- RemoveObjectError{ObjectName: "", Err: err}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill deletion that returned an error.
|
||||||
|
for _, obj := range rmResult.UnDeletedObjects {
|
||||||
|
errorCh <- RemoveObjectError{
|
||||||
|
ObjectName: obj.Key,
|
||||||
|
Err: ErrorResponse{
|
||||||
|
Code: obj.Code,
|
||||||
|
Message: obj.Message,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveObjects remove multiples objects from a bucket.
|
||||||
|
// The list of objects to remove are received from objectsCh.
|
||||||
|
// Remove failures are sent back via error channel.
|
||||||
|
func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
|
||||||
|
errorCh := make(chan RemoveObjectError, 1)
|
||||||
|
|
||||||
|
// Validate if bucket name is valid.
|
||||||
|
if err := isValidBucketName(bucketName); err != nil {
|
||||||
|
defer close(errorCh)
|
||||||
|
errorCh <- RemoveObjectError{
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
return errorCh
|
||||||
|
}
|
||||||
|
// Validate objects channel to be properly allocated.
|
||||||
|
if objectsCh == nil {
|
||||||
|
defer close(errorCh)
|
||||||
|
errorCh <- RemoveObjectError{
|
||||||
|
Err: ErrInvalidArgument("Objects channel cannot be nil"),
|
||||||
|
}
|
||||||
|
return errorCh
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
|
||||||
|
go func(errorCh chan<- RemoveObjectError) {
|
||||||
|
maxEntries := 1000
|
||||||
|
finish := false
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("delete", "")
|
||||||
|
|
||||||
|
// Close error channel when Multi delete finishes.
|
||||||
|
defer close(errorCh)
|
||||||
|
|
||||||
|
// Loop over entries by 1000 and call MultiDelete requests
|
||||||
|
for {
|
||||||
|
if finish {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
count := 0
|
||||||
|
var batch []string
|
||||||
|
|
||||||
|
// Try to gather 1000 entries
|
||||||
|
for object := range objectsCh {
|
||||||
|
batch = append(batch, object)
|
||||||
|
if count++; count >= maxEntries {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
// Multi Objects Delete API doesn't accept empty object list, quit immediatly
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if count < maxEntries {
|
||||||
|
// We didn't have 1000 entries, so this is the last batch
|
||||||
|
finish = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate remove multi objects XML request
|
||||||
|
removeBytes := generateRemoveMultiObjectsRequest(batch)
|
||||||
|
// Execute GET on bucket to list objects.
|
||||||
|
resp, err := c.executeMethod("POST", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
contentBody: bytes.NewReader(removeBytes),
|
||||||
|
contentLength: int64(len(removeBytes)),
|
||||||
|
contentMD5Bytes: sumMD5(removeBytes),
|
||||||
|
contentSHA256Bytes: sum256(removeBytes),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
for _, b := range batch {
|
||||||
|
errorCh <- RemoveObjectError{ObjectName: b, Err: err}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process multiobjects remove xml response
|
||||||
|
processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
|
||||||
|
|
||||||
|
closeResponse(resp)
|
||||||
|
}
|
||||||
|
}(errorCh)
|
||||||
|
return errorCh
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveIncompleteUpload aborts an partially uploaded object.
|
// RemoveIncompleteUpload aborts an partially uploaded object.
|
||||||
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
|
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
|
||||||
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
||||||
|
|
|
@ -206,3 +206,39 @@ type createBucketConfiguration struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
||||||
Location string `xml:"LocationConstraint"`
|
Location string `xml:"LocationConstraint"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteObject container for Delete element in MultiObjects Delete XML request
|
||||||
|
type deleteObject struct {
|
||||||
|
Key string
|
||||||
|
VersionID string `xml:"VersionId,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletedObject container for Deleted element in MultiObjects Delete XML response
|
||||||
|
type deletedObject struct {
|
||||||
|
Key string
|
||||||
|
VersionID string `xml:"VersionId,omitempty"`
|
||||||
|
// These fields are ignored.
|
||||||
|
DeleteMarker bool
|
||||||
|
DeleteMarkerVersionID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
|
||||||
|
type nonDeletedObject struct {
|
||||||
|
Key string
|
||||||
|
Code string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletedMultiObjects container for MultiObjects Delete XML request
|
||||||
|
type deleteMultiObjects struct {
|
||||||
|
XMLName xml.Name `xml:"Delete"`
|
||||||
|
Quiet bool
|
||||||
|
Objects []deleteObject `xml:"Object"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// deletedMultiObjectsResult container for MultiObjects Delete XML response
|
||||||
|
type deleteMultiObjectsResult struct {
|
||||||
|
XMLName xml.Name `xml:"DeleteResult"`
|
||||||
|
DeletedObjects []deletedObject `xml:"Deleted"`
|
||||||
|
UnDeletedObjects []nonDeletedObject `xml:"Error"`
|
||||||
|
}
|
||||||
|
|
54
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
54
vendor/src/github.com/minio/minio-go/api-stat.go
vendored
|
@ -21,6 +21,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BucketExists verify if bucket exists and you have permission to access it.
|
// BucketExists verify if bucket exists and you have permission to access it.
|
||||||
|
@ -49,6 +51,31 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List of header keys to be filtered, usually
|
||||||
|
// from all S3 API http responses.
|
||||||
|
var defaultFilterKeys = []string{
|
||||||
|
"Transfer-Encoding",
|
||||||
|
"Accept-Ranges",
|
||||||
|
"Date",
|
||||||
|
"Server",
|
||||||
|
"Vary",
|
||||||
|
"x-amz-request-id",
|
||||||
|
"x-amz-id-2",
|
||||||
|
// Add new headers to be ignored.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract only necessary metadata header key/values by
|
||||||
|
// filtering them out with a list of custom header keys.
|
||||||
|
func extractObjMetadata(header http.Header) http.Header {
|
||||||
|
filterKeys := append([]string{
|
||||||
|
"ETag",
|
||||||
|
"Content-Length",
|
||||||
|
"Last-Modified",
|
||||||
|
"Content-Type",
|
||||||
|
}, defaultFilterKeys...)
|
||||||
|
return filterHeader(header, filterKeys)
|
||||||
|
}
|
||||||
|
|
||||||
// StatObject verifies if object exists and you have permission to access.
|
// StatObject verifies if object exists and you have permission to access.
|
||||||
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
|
@ -78,8 +105,11 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
|
||||||
md5sum = strings.TrimSuffix(md5sum, "\"")
|
md5sum = strings.TrimSuffix(md5sum, "\"")
|
||||||
|
|
||||||
|
// Content-Length is not valid for Google Cloud Storage, do not verify.
|
||||||
|
var size int64 = -1
|
||||||
|
if !s3utils.IsGoogleEndpoint(c.endpointURL) {
|
||||||
// Parse content length.
|
// Parse content length.
|
||||||
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, ErrorResponse{
|
return ObjectInfo{}, ErrorResponse{
|
||||||
Code: "InternalError",
|
Code: "InternalError",
|
||||||
|
@ -91,6 +121,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
Region: resp.Header.Get("x-amz-bucket-region"),
|
Region: resp.Header.Get("x-amz-bucket-region"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Parse Last-Modified has http time format.
|
// Parse Last-Modified has http time format.
|
||||||
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -109,12 +140,19 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
|
||||||
if contentType == "" {
|
if contentType == "" {
|
||||||
contentType = "application/octet-stream"
|
contentType = "application/octet-stream"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extract only the relevant header keys describing the object.
|
||||||
|
// following function filters out a list of standard set of keys
|
||||||
|
// which are not part of object metadata.
|
||||||
|
metadata := extractObjMetadata(resp.Header)
|
||||||
|
|
||||||
// Save object metadata info.
|
// Save object metadata info.
|
||||||
var objectStat ObjectInfo
|
return ObjectInfo{
|
||||||
objectStat.ETag = md5sum
|
ETag: md5sum,
|
||||||
objectStat.Key = objectName
|
Key: objectName,
|
||||||
objectStat.Size = size
|
Size: size,
|
||||||
objectStat.LastModified = date
|
LastModified: date,
|
||||||
objectStat.ContentType = contentType
|
ContentType: contentType,
|
||||||
return objectStat, nil
|
Metadata: metadata,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
126
vendor/src/github.com/minio/minio-go/api.go
vendored
126
vendor/src/github.com/minio/minio-go/api.go
vendored
|
@ -33,12 +33,18 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client implements Amazon S3 compatible methods.
|
// Client implements Amazon S3 compatible methods.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
/// Standard options.
|
/// Standard options.
|
||||||
|
|
||||||
|
// Parsed endpoint url provided by the user.
|
||||||
|
endpointURL url.URL
|
||||||
|
|
||||||
// AccessKeyID required for authorized requests.
|
// AccessKeyID required for authorized requests.
|
||||||
accessKeyID string
|
accessKeyID string
|
||||||
// SecretAccessKey required for authorized requests.
|
// SecretAccessKey required for authorized requests.
|
||||||
|
@ -53,7 +59,6 @@ type Client struct {
|
||||||
appName string
|
appName string
|
||||||
appVersion string
|
appVersion string
|
||||||
}
|
}
|
||||||
endpointURL string
|
|
||||||
|
|
||||||
// Indicate whether we are using https or not
|
// Indicate whether we are using https or not
|
||||||
secure bool
|
secure bool
|
||||||
|
@ -66,6 +71,9 @@ type Client struct {
|
||||||
isTraceEnabled bool
|
isTraceEnabled bool
|
||||||
traceOutput io.Writer
|
traceOutput io.Writer
|
||||||
|
|
||||||
|
// S3 specific accelerated endpoint.
|
||||||
|
s3AccelerateEndpoint string
|
||||||
|
|
||||||
// Random seed.
|
// Random seed.
|
||||||
random *rand.Rand
|
random *rand.Rand
|
||||||
}
|
}
|
||||||
|
@ -73,7 +81,7 @@ type Client struct {
|
||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "2.0.1"
|
libraryVersion = "2.0.4"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
|
@ -116,13 +124,12 @@ func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Cl
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Google cloud storage should be set to signature V2, force it if
|
// Google cloud storage should be set to signature V2, force it if not.
|
||||||
// not.
|
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
|
||||||
if isGoogleEndpoint(clnt.endpointURL) {
|
|
||||||
clnt.signature = SignatureV2
|
clnt.signature = SignatureV2
|
||||||
}
|
}
|
||||||
// If Amazon S3 set to signature v2.n
|
// If Amazon S3 set to signature v2.n
|
||||||
if isAmazonEndpoint(clnt.endpointURL) {
|
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
|
||||||
clnt.signature = SignatureV4
|
clnt.signature = SignatureV4
|
||||||
}
|
}
|
||||||
return clnt, nil
|
return clnt, nil
|
||||||
|
@ -151,6 +158,18 @@ func (r *lockedRandSource) Seed(seed int64) {
|
||||||
r.lk.Unlock()
|
r.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// redirectHeaders copies all headers when following a redirect URL.
|
||||||
|
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
|
||||||
|
func redirectHeaders(req *http.Request, via []*http.Request) error {
|
||||||
|
if len(via) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for key, val := range via[0].Header {
|
||||||
|
req.Header[key] = val
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
|
||||||
// construct endpoint.
|
// construct endpoint.
|
||||||
endpointURL, err := getEndpointURL(endpoint, secure)
|
endpointURL, err := getEndpointURL(endpoint, secure)
|
||||||
|
@ -170,11 +189,12 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
|
||||||
clnt.secure = secure
|
clnt.secure = secure
|
||||||
|
|
||||||
// Save endpoint URL, user agent for future uses.
|
// Save endpoint URL, user agent for future uses.
|
||||||
clnt.endpointURL = endpointURL.String()
|
clnt.endpointURL = *endpointURL
|
||||||
|
|
||||||
// Instantiate http client and bucket location cache.
|
// Instantiate http client and bucket location cache.
|
||||||
clnt.httpClient = &http.Client{
|
clnt.httpClient = &http.Client{
|
||||||
Transport: http.DefaultTransport,
|
Transport: http.DefaultTransport,
|
||||||
|
CheckRedirect: redirectHeaders,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiae bucket location cache.
|
// Instantiae bucket location cache.
|
||||||
|
@ -189,8 +209,7 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
|
||||||
|
|
||||||
// SetAppInfo - add application details to user agent.
|
// SetAppInfo - add application details to user agent.
|
||||||
func (c *Client) SetAppInfo(appName string, appVersion string) {
|
func (c *Client) SetAppInfo(appName string, appVersion string) {
|
||||||
// if app name and version is not set, we do not a new user
|
// if app name and version not set, we do not set a new user agent.
|
||||||
// agent.
|
|
||||||
if appName != "" && appVersion != "" {
|
if appName != "" && appVersion != "" {
|
||||||
c.appInfo = struct {
|
c.appInfo = struct {
|
||||||
appName string
|
appName string
|
||||||
|
@ -241,8 +260,18 @@ func (c *Client) TraceOff() {
|
||||||
c.isTraceEnabled = false
|
c.isTraceEnabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestMetadata - is container for all the values to make a
|
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
|
||||||
// request.
|
// requests. This feature is only specific to S3 for all other endpoints this
|
||||||
|
// function does nothing. To read further details on s3 transfer acceleration
|
||||||
|
// please vist -
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||||
|
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
|
||||||
|
if s3utils.IsAmazonEndpoint(c.endpointURL) {
|
||||||
|
c.s3AccelerateEndpoint = accelerateEndpoint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestMetadata - is container for all the values to make a request.
|
||||||
type requestMetadata struct {
|
type requestMetadata struct {
|
||||||
// If set newRequest presigns the URL.
|
// If set newRequest presigns the URL.
|
||||||
presignURL bool
|
presignURL bool
|
||||||
|
@ -262,6 +291,12 @@ type requestMetadata struct {
|
||||||
contentMD5Bytes []byte
|
contentMD5Bytes []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// regCred matches credential string in HTTP header
|
||||||
|
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
|
||||||
|
|
||||||
|
// regCred matches signature string in HTTP header
|
||||||
|
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
|
||||||
|
|
||||||
// Filter out signature value from Authorization header.
|
// Filter out signature value from Authorization header.
|
||||||
func (c Client) filterSignature(req *http.Request) {
|
func (c Client) filterSignature(req *http.Request) {
|
||||||
// For anonymous requests, no need to filter.
|
// For anonymous requests, no need to filter.
|
||||||
|
@ -281,11 +316,9 @@ func (c Client) filterSignature(req *http.Request) {
|
||||||
origAuth := req.Header.Get("Authorization")
|
origAuth := req.Header.Get("Authorization")
|
||||||
// Strip out accessKeyID from:
|
// Strip out accessKeyID from:
|
||||||
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
|
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
|
||||||
regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
|
|
||||||
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
|
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
|
||||||
|
|
||||||
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
// Strip out 256-bit signature from: Signature=<256-bit signature>
|
||||||
regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
|
|
||||||
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
|
||||||
|
|
||||||
// Set a temporary redacted auth
|
// Set a temporary redacted auth
|
||||||
|
@ -364,8 +397,12 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
||||||
|
|
||||||
// do - execute http request.
|
// do - execute http request.
|
||||||
func (c Client) do(req *http.Request) (*http.Response, error) {
|
func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||||
// do the request.
|
var resp *http.Response
|
||||||
resp, err := c.httpClient.Do(req)
|
var err error
|
||||||
|
// Do the request in a loop in case of 307 http is met since golang still doesn't
|
||||||
|
// handle properly this situation (https://github.com/golang/go/issues/7912)
|
||||||
|
for {
|
||||||
|
resp, err = c.httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Handle this specifically for now until future Golang
|
// Handle this specifically for now until future Golang
|
||||||
// versions fix this issue properly.
|
// versions fix this issue properly.
|
||||||
|
@ -379,6 +416,17 @@ func (c Client) do(req *http.Request) (*http.Response, error) {
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
|
||||||
|
if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
|
||||||
|
newURL, err := url.Parse(resp.Header.Get("Location"))
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
req.URL = newURL
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Response cannot be non-nil, report if its the case.
|
// Response cannot be non-nil, report if its the case.
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
|
@ -467,6 +515,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
|
||||||
|
|
||||||
// Read the body to be saved later.
|
// Read the body to be saved later.
|
||||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||||
|
// res.Body should be closed
|
||||||
|
closeResponse(res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -512,7 +562,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||||
|
|
||||||
// Default all requests to "us-east-1" or "cn-north-1" (china region)
|
// Default all requests to "us-east-1" or "cn-north-1" (china region)
|
||||||
location := "us-east-1"
|
location := "us-east-1"
|
||||||
if isAmazonChinaEndpoint(c.endpointURL) {
|
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||||
// For china specifically we need to set everything to
|
// For china specifically we need to set everything to
|
||||||
// cn-north-1 for now, there is no easier way until AWS S3
|
// cn-north-1 for now, there is no easier way until AWS S3
|
||||||
// provides a cleaner compatible API across "us-east-1" and
|
// provides a cleaner compatible API across "us-east-1" and
|
||||||
|
@ -550,10 +600,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||||
}
|
}
|
||||||
if c.signature.isV2() {
|
if c.signature.isV2() {
|
||||||
// Presign URL with signature v2.
|
// Presign URL with signature v2.
|
||||||
req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
|
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
|
||||||
} else {
|
} else {
|
||||||
// Presign URL with signature v4.
|
// Presign URL with signature v4.
|
||||||
req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
|
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
@ -563,10 +613,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
|
// FIXME: Enable this when Google Cloud Storage properly supports 100-continue.
|
||||||
// Skip setting 'expect' header for Google Cloud Storage, there
|
// Skip setting 'expect' header for Google Cloud Storage, there
|
||||||
// are some known issues - https://github.com/restic/restic/issues/520
|
// are some known issues - https://github.com/restic/restic/issues/520
|
||||||
if !isGoogleEndpoint(c.endpointURL) {
|
if !s3utils.IsGoogleEndpoint(c.endpointURL) && c.s3AccelerateEndpoint == "" {
|
||||||
// Set 'Expect' header for the request.
|
// Set 'Expect' header for the request.
|
||||||
req.Header.Set("Expect", "100-continue")
|
req.Header.Set("Expect", "100-continue")
|
||||||
}
|
}
|
||||||
|
@ -610,10 +660,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
|
||||||
if !c.anonymous {
|
if !c.anonymous {
|
||||||
if c.signature.isV2() {
|
if c.signature.isV2() {
|
||||||
// Add signature version '2' authorization header.
|
// Add signature version '2' authorization header.
|
||||||
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
} else if c.signature.isV4() {
|
} else if c.signature.isV4() {
|
||||||
// Add signature version '4' authorization header.
|
// Add signature version '4' authorization header.
|
||||||
req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -631,26 +681,34 @@ func (c Client) setUserAgent(req *http.Request) {
|
||||||
|
|
||||||
// makeTargetURL make a new target url.
|
// makeTargetURL make a new target url.
|
||||||
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
|
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
|
||||||
// Save host.
|
host := c.endpointURL.Host
|
||||||
url, err := url.Parse(c.endpointURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
host := url.Host
|
|
||||||
// For Amazon S3 endpoint, try to fetch location based endpoint.
|
// For Amazon S3 endpoint, try to fetch location based endpoint.
|
||||||
if isAmazonEndpoint(c.endpointURL) {
|
if s3utils.IsAmazonEndpoint(c.endpointURL) {
|
||||||
|
if c.s3AccelerateEndpoint != "" && bucketName != "" {
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||||
|
// Disable transfer acceleration for non-compliant bucket names.
|
||||||
|
if strings.Contains(bucketName, ".") {
|
||||||
|
return nil, ErrTransferAccelerationBucket(bucketName)
|
||||||
|
}
|
||||||
|
// If transfer acceleration is requested set new host.
|
||||||
|
// For more details about enabling transfer acceleration read here.
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
|
||||||
|
host = c.s3AccelerateEndpoint
|
||||||
|
} else {
|
||||||
// Fetch new host based on the bucket location.
|
// Fetch new host based on the bucket location.
|
||||||
host = getS3Endpoint(bucketLocation)
|
host = getS3Endpoint(bucketLocation)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Save scheme.
|
// Save scheme.
|
||||||
scheme := url.Scheme
|
scheme := c.endpointURL.Scheme
|
||||||
|
|
||||||
urlStr := scheme + "://" + host + "/"
|
urlStr := scheme + "://" + host + "/"
|
||||||
// Make URL only if bucketName is available, otherwise use the
|
// Make URL only if bucketName is available, otherwise use the
|
||||||
// endpoint URL.
|
// endpoint URL.
|
||||||
if bucketName != "" {
|
if bucketName != "" {
|
||||||
// Save if target url will have buckets which suppport virtual host.
|
// Save if target url will have buckets which suppport virtual host.
|
||||||
isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
|
isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
|
||||||
|
|
||||||
// If endpoint supports virtual host style use that always.
|
// If endpoint supports virtual host style use that always.
|
||||||
// Currently only S3 and Google Cloud Storage would support
|
// Currently only S3 and Google Cloud Storage would support
|
||||||
|
@ -658,19 +716,19 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
|
||||||
if isVirtualHostStyle {
|
if isVirtualHostStyle {
|
||||||
urlStr = scheme + "://" + bucketName + "." + host + "/"
|
urlStr = scheme + "://" + bucketName + "." + host + "/"
|
||||||
if objectName != "" {
|
if objectName != "" {
|
||||||
urlStr = urlStr + urlEncodePath(objectName)
|
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If not fall back to using path style.
|
// If not fall back to using path style.
|
||||||
urlStr = urlStr + bucketName + "/"
|
urlStr = urlStr + bucketName + "/"
|
||||||
if objectName != "" {
|
if objectName != "" {
|
||||||
urlStr = urlStr + urlEncodePath(objectName)
|
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If there are any query values, add them to the end.
|
// If there are any query values, add them to the end.
|
||||||
if len(queryValues) > 0 {
|
if len(queryValues) > 0 {
|
||||||
urlStr = urlStr + "?" + queryEncode(queryValues)
|
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
|
||||||
}
|
}
|
||||||
u, err := url.Parse(urlStr)
|
u, err := url.Parse(urlStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -18,7 +18,6 @@ package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
crand "crypto/rand"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -43,10 +42,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -89,10 +88,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -113,13 +112,8 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate data more than 32K
|
// Generate data more than 32K.
|
||||||
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
buf := bytes.Repeat([]byte("h"), rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the data
|
// Save the data
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
@ -174,10 +168,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -198,15 +192,18 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
|
||||||
|
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
i := 0
|
i := 0
|
||||||
for i < 25 {
|
for i < 25 {
|
||||||
_, err = io.CopyN(writer, crand.Reader, 128*1024)
|
_, err = io.CopyN(writer, r, 128*1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
|
r.Seek(0, 0)
|
||||||
}
|
}
|
||||||
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
|
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
|
||||||
}()
|
}()
|
||||||
|
@ -241,10 +238,10 @@ func TestResumablePutObjectV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -271,8 +268,9 @@ func TestResumablePutObjectV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
|
||||||
// Copy 11MiB worth of random data.
|
// Copy 11MiB worth of random data.
|
||||||
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
|
n, err := io.CopyN(file, r, 11*1024*1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
@ -352,10 +350,10 @@ func TestFPutObjectV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -382,7 +380,8 @@ func TestFPutObjectV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
|
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
|
||||||
|
n, err := io.CopyN(file, r, 11*1024*1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
@ -500,10 +499,10 @@ func TestResumableFPutObjectV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -529,7 +528,8 @@ func TestResumableFPutObjectV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
|
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
|
||||||
|
n, err := io.CopyN(file, r, 11*1024*1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
@ -577,10 +577,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -628,10 +628,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -652,15 +652,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
||||||
t.Fatal("Error:", err, bucketName)
|
t.Fatal("Error:", err, bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate data more than 32K
|
// Generate data more than 32K.
|
||||||
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
// Save the data.
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the data
|
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
|
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -716,7 +711,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var buffer1 bytes.Buffer
|
var buffer1 bytes.Buffer
|
||||||
if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
|
if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
@ -766,10 +761,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object.
|
// Instantiate new minio client object.
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -791,12 +786,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate data more than 32K
|
// Generate data more than 32K
|
||||||
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
buf := bytes.Repeat([]byte("8"), rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the data
|
// Save the data
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
@ -907,10 +897,10 @@ func TestCopyObjectV2(t *testing.T) {
|
||||||
|
|
||||||
// Instantiate new minio client object
|
// Instantiate new minio client object
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -938,12 +928,7 @@ func TestCopyObjectV2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate data more than 32K
|
// Generate data more than 32K
|
||||||
buf := make([]byte, rand.Intn(1<<20)+32*1024)
|
buf := bytes.Repeat([]byte("9"), rand.Intn(1<<20)+32*1024)
|
||||||
|
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the data
|
// Save the data
|
||||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||||
|
@ -958,7 +943,7 @@ func TestCopyObjectV2(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set copy conditions.
|
// Set copy conditions.
|
||||||
copyConds := NewCopyConditions()
|
copyConds := CopyConditions{}
|
||||||
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -1029,10 +1014,10 @@ func TestFunctionalV2(t *testing.T) {
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
c, err := NewV2(
|
c, err := NewV2(
|
||||||
"s3.amazonaws.com",
|
os.Getenv("S3_ADDRESS"),
|
||||||
os.Getenv("ACCESS_KEY"),
|
os.Getenv("ACCESS_KEY"),
|
||||||
os.Getenv("SECRET_KEY"),
|
os.Getenv("SECRET_KEY"),
|
||||||
true,
|
mustParseBool(os.Getenv("S3_SECURE")),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
|
@ -1109,11 +1094,7 @@ func TestFunctionalV2(t *testing.T) {
|
||||||
objectName := bucketName + "unique"
|
objectName := bucketName + "unique"
|
||||||
|
|
||||||
// Generate data
|
// Generate data
|
||||||
buf := make([]byte, rand.Intn(1<<19))
|
buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
|
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1243,11 +1224,9 @@ func TestFunctionalV2(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
buf = make([]byte, rand.Intn(1<<20))
|
// Generate data more than 32K
|
||||||
_, err = io.ReadFull(crand.Reader, buf)
|
buf = bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error: ", err)
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
|
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -18,11 +18,9 @@ package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -202,49 +200,6 @@ func TestTempFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests url encoding.
|
|
||||||
func TestEncodeURL2Path(t *testing.T) {
|
|
||||||
type urlStrings struct {
|
|
||||||
objName string
|
|
||||||
encodedObjName string
|
|
||||||
}
|
|
||||||
|
|
||||||
bucketName := "bucketName"
|
|
||||||
want := []urlStrings{
|
|
||||||
{
|
|
||||||
objName: "本語",
|
|
||||||
encodedObjName: "%E6%9C%AC%E8%AA%9E",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "本語.1",
|
|
||||||
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: ">123>3123123",
|
|
||||||
encodedObjName: "%3E123%3E3123123",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "test 1 2.txt",
|
|
||||||
encodedObjName: "test%201%202.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
objName: "test++ 1.txt",
|
|
||||||
encodedObjName: "test%2B%2B%201.txt",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, o := range want {
|
|
||||||
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
urlPath := "/" + bucketName + "/" + o.encodedObjName
|
|
||||||
if urlPath != encodeURL2Path(u) {
|
|
||||||
t.Fatal("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests error response structure.
|
// Tests error response structure.
|
||||||
func TestErrorResponse(t *testing.T) {
|
func TestErrorResponse(t *testing.T) {
|
||||||
var err error
|
var err error
|
||||||
|
@ -270,53 +225,6 @@ func TestErrorResponse(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests signature calculation.
|
|
||||||
func TestSignatureCalculation(t *testing.T) {
|
|
||||||
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Error:", err)
|
|
||||||
}
|
|
||||||
req = signV4(*req, "", "", "us-east-1")
|
|
||||||
if req.Header.Get("Authorization") != "" {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV4(*req, "", "", "us-east-1", 0)
|
|
||||||
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV2(*req, "", "")
|
|
||||||
if req.Header.Get("Authorization") != "" {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV2(*req, "", "", 0)
|
|
||||||
if strings.Contains(req.URL.RawQuery, "Signature") {
|
|
||||||
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
|
|
||||||
if req.Header.Get("Authorization") == "" {
|
|
||||||
t.Fatal("Error: normal credentials should have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
|
|
||||||
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
|
||||||
t.Fatal("Error: normal credentials should have Signature query resource.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
|
|
||||||
if req.Header.Get("Authorization") == "" {
|
|
||||||
t.Fatal("Error: normal credentials should have Authorization header.")
|
|
||||||
}
|
|
||||||
|
|
||||||
req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
|
|
||||||
if !strings.Contains(req.URL.RawQuery, "Signature") {
|
|
||||||
t.Fatal("Error: normal credentials should not have Signature query resource.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests signature type.
|
// Tests signature type.
|
||||||
func TestSignatureType(t *testing.T) {
|
func TestSignatureType(t *testing.T) {
|
||||||
clnt := Client{}
|
clnt := Client{}
|
||||||
|
@ -354,11 +262,11 @@ func TestBucketPolicyTypes(t *testing.T) {
|
||||||
|
|
||||||
// Tests optimal part size.
|
// Tests optimal part size.
|
||||||
func TestPartSize(t *testing.T) {
|
func TestPartSize(t *testing.T) {
|
||||||
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5000000000000000000)
|
_, _, _, err := optimalPartInfo(5000000000000000000)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Error: should fail")
|
t.Fatal("Error: should fail")
|
||||||
}
|
}
|
||||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5497558138880)
|
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error: ", err)
|
t.Fatal("Error: ", err)
|
||||||
}
|
}
|
||||||
|
@ -371,7 +279,7 @@ func TestPartSize(t *testing.T) {
|
||||||
if lastPartSize != 134217728 {
|
if lastPartSize != 134217728 {
|
||||||
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
|
||||||
}
|
}
|
||||||
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000)
|
_, partSize, _, err = optimalPartInfo(5000000000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Error:", err)
|
t.Fatal("Error:", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,9 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bucketLocationCache - Provides simple mechanism to hold bucket
|
// bucketLocationCache - Provides simple mechanism to hold bucket
|
||||||
|
@ -85,7 +88,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if isAmazonChinaEndpoint(c.endpointURL) {
|
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
|
||||||
// For china specifically we need to set everything to
|
// For china specifically we need to set everything to
|
||||||
// cn-north-1 for now, there is no easier way until AWS S3
|
// cn-north-1 for now, there is no easier way until AWS S3
|
||||||
// provides a cleaner compatible API across "us-east-1" and
|
// provides a cleaner compatible API across "us-east-1" and
|
||||||
|
@ -160,10 +163,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
|
||||||
urlValues.Set("location", "")
|
urlValues.Set("location", "")
|
||||||
|
|
||||||
// Set get bucket location always as path style.
|
// Set get bucket location always as path style.
|
||||||
targetURL, err := url.Parse(c.endpointURL)
|
targetURL := c.endpointURL
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
targetURL.RawQuery = urlValues.Encode()
|
targetURL.RawQuery = urlValues.Encode()
|
||||||
|
|
||||||
|
@ -189,9 +189,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
|
||||||
|
|
||||||
// Sign the request.
|
// Sign the request.
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
} else if c.signature.isV2() {
|
} else if c.signature.isV2() {
|
||||||
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,8 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3signer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test validates `newBucketLocationCache`.
|
// Test validates `newBucketLocationCache`.
|
||||||
|
@ -70,14 +72,12 @@ func TestGetBucketLocationRequest(t *testing.T) {
|
||||||
urlValues.Set("location", "")
|
urlValues.Set("location", "")
|
||||||
|
|
||||||
// Set get bucket location always as path style.
|
// Set get bucket location always as path style.
|
||||||
targetURL, err := url.Parse(c.endpointURL)
|
targetURL := c.endpointURL
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
targetURL.Path = path.Join(bucketName, "") + "/"
|
targetURL.Path = path.Join(bucketName, "") + "/"
|
||||||
targetURL.RawQuery = urlValues.Encode()
|
targetURL.RawQuery = urlValues.Encode()
|
||||||
|
|
||||||
// Get a new HTTP request for the method.
|
// Get a new HTTP request for the method.
|
||||||
|
var err error
|
||||||
req, err = http.NewRequest("GET", targetURL.String(), nil)
|
req, err = http.NewRequest("GET", targetURL.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -93,9 +93,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
|
||||||
|
|
||||||
// Sign the request.
|
// Sign the request.
|
||||||
if c.signature.isV4() {
|
if c.signature.isV4() {
|
||||||
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
|
||||||
} else if c.signature.isV2() {
|
} else if c.signature.isV2() {
|
||||||
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
|
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
|
||||||
}
|
}
|
||||||
return req, nil
|
return req, nil
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (arn Arn) String() string {
|
||||||
// NotificationConfig - represents one single notification configuration
|
// NotificationConfig - represents one single notification configuration
|
||||||
// such as topic, queue or lambda configuration.
|
// such as topic, queue or lambda configuration.
|
||||||
type NotificationConfig struct {
|
type NotificationConfig struct {
|
||||||
Id string `xml:"Id,omitempty"`
|
ID string `xml:"Id,omitempty"`
|
||||||
Arn Arn `xml:"-"`
|
Arn Arn `xml:"-"`
|
||||||
Events []NotificationEventType `xml:"Event"`
|
Events []NotificationEventType `xml:"Event"`
|
||||||
Filter *Filter `xml:"Filter,omitempty"`
|
Filter *Filter `xml:"Filter,omitempty"`
|
||||||
|
|
|
@ -18,7 +18,7 @@ package minio
|
||||||
|
|
||||||
/// Multipart upload defaults.
|
/// Multipart upload defaults.
|
||||||
|
|
||||||
// miniPartSize - minimum part size 5MiB per object after which
|
// miniPartSize - minimum part size 64MiB per object after which
|
||||||
// putObject behaves internally as multipart.
|
// putObject behaves internally as multipart.
|
||||||
const minPartSize = 1024 * 1024 * 64
|
const minPartSize = 1024 * 1024 * 64
|
||||||
|
|
||||||
|
@ -44,3 +44,9 @@ const optimalReadBufferSize = 1024 * 1024 * 5
|
||||||
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
||||||
// we don't want to sign the request payload
|
// we don't want to sign the request payload
|
||||||
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// Signature related constants.
|
||||||
|
const (
|
||||||
|
signV4Algorithm = "AWS4-HMAC-SHA256"
|
||||||
|
iso8601DateFormat = "20060102T150405Z"
|
||||||
|
)
|
||||||
|
|
|
@ -41,11 +41,13 @@ type CopyConditions struct {
|
||||||
conditions []copyCondition
|
conditions []copyCondition
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCopyConditions - Instantiate new list of conditions.
|
// NewCopyConditions - Instantiate new list of conditions. This
|
||||||
|
// function is left behind for backward compatibility. The idiomatic
|
||||||
|
// way to set an empty set of copy conditions is,
|
||||||
|
// ``copyConditions := CopyConditions{}``.
|
||||||
|
//
|
||||||
func NewCopyConditions() CopyConditions {
|
func NewCopyConditions() CopyConditions {
|
||||||
return CopyConditions{
|
return CopyConditions{}
|
||||||
conditions: make([]copyCondition, 0),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMatchETag - set match etag.
|
// SetMatchETag - set match etag.
|
||||||
|
|
503
vendor/src/github.com/minio/minio-go/docs/API.md
vendored
503
vendor/src/github.com/minio/minio-go/docs/API.md
vendored
|
@ -1,4 +1,4 @@
|
||||||
# Golang Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
# Minio Go Client API Reference [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
|
||||||
|
|
||||||
## Initialize Minio Client object.
|
## Initialize Minio Client object.
|
||||||
|
|
||||||
|
@ -54,21 +54,22 @@ func main() {
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations |
|
| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations | Client custom settings |
|
||||||
|:---|:---|:---|:---|
|
|:---|:---|:---|:---|:---|
|
||||||
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) |
|
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
|
||||||
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) |
|
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
|
||||||
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`SetBucketNotification`](#SetBucketNotification) |
|
|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
|
||||||
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`GetBucketNotification`](#GetBucketNotification) |
|
| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
|
||||||
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`DeleteBucketNotification`](#DeleteBucketNotification) |
|
|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
|
||||||
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | |
|
|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
|
||||||
|[`ListIncompleteUploads`](#ListIncompleteUploads) |[`FPutObject`](#FPutObject) | | |
|
|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
|
||||||
|
| | [`FPutObject`](#FPutObject) | | |
|
||||||
| | [`FGetObject`](#FGetObject) | | |
|
| | [`FGetObject`](#FGetObject) | | |
|
||||||
|
|
||||||
## 1. Constructor
|
## 1. Constructor
|
||||||
<a name="Minio"></a>
|
<a name="Minio"></a>
|
||||||
|
|
||||||
### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*Client, error)
|
### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error)
|
||||||
Initializes a new client object.
|
Initializes a new client object.
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
@ -76,16 +77,16 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`endpoint` | _string_ |S3 object storage endpoint. |
|
|`endpoint` | _string_ |S3 compatible object storage endpoint |
|
||||||
| `accessKeyID` |_string_ | Access key for the object storage endpoint. |
|
|`accessKeyID` |_string_ |Access key for the object storage |
|
||||||
| `secretAccessKey` | _string_ |Secret key for the object storage endpoint. |
|
|`secretAccessKey` | _string_ |Secret key for the object storage |
|
||||||
|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
|
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
|
||||||
|
|
||||||
|
|
||||||
## 2. Bucket operations
|
## 2. Bucket operations
|
||||||
|
|
||||||
<a name="MakeBucket"></a>
|
<a name="MakeBucket"></a>
|
||||||
### MakeBucket(bucketName string, location string) error
|
### MakeBucket(bucketName, location string) error
|
||||||
Creates a new bucket.
|
Creates a new bucket.
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,8 +94,8 @@ __Parameters__
|
||||||
|
|
||||||
| Param | Type | Description |
|
| Param | Type | Description |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
|`bucketName` | _string_ | Name of the bucket. |
|
|`bucketName` | _string_ | Name of the bucket |
|
||||||
| `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
|
| `location` | _string_ | Region where the bucket is to be created. Default value is us-east-1. Other valid values are listed below. Note: When used with minio server, use the region specified in its config file (defaults to us-east-1).|
|
||||||
| | |us-east-1 |
|
| | |us-east-1 |
|
||||||
| | |us-west-1 |
|
| | |us-west-1 |
|
||||||
| | |us-west-2 |
|
| | |us-west-2 |
|
||||||
|
@ -127,22 +128,22 @@ Lists all buckets.
|
||||||
|
|
||||||
| Param | Type | Description |
|
| Param | Type | Description |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
|
|`bucketList` | _[]BucketInfo_ | Lists of all buckets |
|
||||||
|
|
||||||
|
|
||||||
| Param | Type | Description |
|
| Param | Type | Description |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
|`bucket.Name` | _string_ | bucket name. |
|
|`bucket.Name` | _string_ | Name of the bucket |
|
||||||
|`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
|
|`bucket.CreationDate` | _time.Time_ | Date of bucket creation |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
buckets, err := minioClient.ListBuckets()
|
buckets, err := minioClient.ListBuckets()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -150,10 +151,10 @@ for _, bucket := range buckets {
|
||||||
fmt.Println(bucket)
|
fmt.Println(bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="BucketExists"></a>
|
<a name="BucketExists"></a>
|
||||||
### BucketExists(bucketName string) error
|
### BucketExists(bucketName string) (found bool, err error)
|
||||||
|
|
||||||
Checks if a bucket exists.
|
Checks if a bucket exists.
|
||||||
|
|
||||||
|
@ -162,7 +163,15 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|
||||||
|
|
||||||
|
__Return Values__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`found` | _bool_ | Indicates whether bucket exists or not |
|
||||||
|
|`err` | _error_ | Standard Error |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -170,11 +179,14 @@ __Example__
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.BucketExists("mybucket")
|
found, err := minioClient.BucketExists("mybucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if found {
|
||||||
|
fmt.Println("Bucket found")
|
||||||
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -188,7 +200,7 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -204,7 +216,7 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListObjects"></a>
|
<a name="ListObjects"></a>
|
||||||
### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
|
### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
|
||||||
|
|
||||||
Lists objects in a bucket.
|
Lists objects in a bucket.
|
||||||
|
|
||||||
|
@ -213,24 +225,24 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
|
|`objectPrefix` |_string_ | Prefix of objects to be listed |
|
||||||
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
|`recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
||||||
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
|
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjects iterator. |
|
||||||
|
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
|
|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`objectInfo.Key` | _string_ |name of the object. |
|
|`objectInfo.Key` | _string_ |Name of the object |
|
||||||
|`objectInfo.Size` | _int64_ |size of the object. |
|
|`objectInfo.Size` | _int64_ |Size of the object |
|
||||||
|`objectInfo.ETag` | _string_ |etag of the object. |
|
|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
|
||||||
|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
|
|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -255,19 +267,19 @@ for object := range objectCh {
|
||||||
|
|
||||||
|
|
||||||
<a name="ListObjectsV2"></a>
|
<a name="ListObjectsV2"></a>
|
||||||
### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
|
### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
|
||||||
|
|
||||||
Lists objects in a bucket using the recommanded listing API v2
|
Lists objects in a bucket using the recommended listing API v2
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
| `objectPrefix` |_string_ | the prefix of the objects that should be listed. |
|
| `objectPrefix` |_string_ | Prefix of objects to be listed |
|
||||||
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
||||||
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
|
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListObjectsV2 iterator. |
|
||||||
|
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
@ -278,10 +290,10 @@ __Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`objectInfo.Key` | _string_ |name of the object. |
|
|`objectInfo.Key` | _string_ |Name of the object |
|
||||||
|`objectInfo.Size` | _int64_ |size of the object. |
|
|`objectInfo.Size` | _int64_ |Size of the object |
|
||||||
|`objectInfo.ETag` | _string_ |etag of the object. |
|
|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
|
||||||
|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
|
|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -305,7 +317,7 @@ for object := range objectCh {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListIncompleteUploads"></a>
|
<a name="ListIncompleteUploads"></a>
|
||||||
### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
|
### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
|
||||||
|
|
||||||
Lists partially uploaded objects in a bucket.
|
Lists partially uploaded objects in a bucket.
|
||||||
|
|
||||||
|
@ -315,25 +327,25 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
| `prefix` |_string_ | prefix of the object names that are partially uploaded |
|
| `prefix` |_string_ | Prefix of objects that are partially uploaded |
|
||||||
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|
||||||
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
|
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenIncompleteUploads iterator. |
|
||||||
|
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
|
|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
|
|`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object |
|
||||||
|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
|
|`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object |
|
||||||
|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
|
|`multiPartObjInfo.Size` | _int64_ |Size of incompletely uploaded object |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -361,7 +373,7 @@ for multiPartObject := range multiPartObjectCh {
|
||||||
## 3. Object operations
|
## 3. Object operations
|
||||||
|
|
||||||
<a name="GetObject"></a>
|
<a name="GetObject"></a>
|
||||||
### GetObject(bucketName string, objectName string) (*Object, error)
|
### GetObject(bucketName, objectName string) (*Object, error)
|
||||||
|
|
||||||
Downloads an object.
|
Downloads an object.
|
||||||
|
|
||||||
|
@ -371,8 +383,8 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
@ -380,7 +392,7 @@ __Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`object` | _*minio.Object_ |_minio.Object_ represents object reader |
|
|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -406,7 +418,7 @@ if _, err = io.Copy(localFile, object); err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="FGetObject"></a>
|
<a name="FGetObject"></a>
|
||||||
### FGetObject(bucketName string, objectName string, filePath string) error
|
### FGetObject(bucketName, objectName, filePath string) error
|
||||||
Downloads and saves the object as a file in the local filesystem.
|
Downloads and saves the object as a file in the local filesystem.
|
||||||
|
|
||||||
|
|
||||||
|
@ -415,9 +427,9 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`filePath` | _string_ |path to which the object data will be written to. |
|
|`filePath` | _string_ |Path to download object to |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -434,7 +446,7 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="PutObject"></a>
|
<a name="PutObject"></a>
|
||||||
### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
|
### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
|
||||||
|
|
||||||
Uploads an object.
|
Uploads an object.
|
||||||
|
|
||||||
|
@ -444,16 +456,16 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`reader` | _io.Reader_ |Any golang object implementing io.Reader. |
|
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
|
||||||
|`contentType` | _string_ |content type of the object. |
|
|`contentType` | _string_ |Content type of the object |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
Uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, PutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
||||||
|
|
||||||
|
@ -477,7 +489,7 @@ if err != nil {
|
||||||
|
|
||||||
|
|
||||||
<a name="CopyObject"></a>
|
<a name="CopyObject"></a>
|
||||||
### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
|
### CopyObject(bucketName, objectName, objectSource string, conditions CopyConditions) error
|
||||||
|
|
||||||
Copy a source object into a new object with the provided name in the provided bucket.
|
Copy a source object into a new object with the provided name in the provided bucket.
|
||||||
|
|
||||||
|
@ -487,34 +499,44 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`objectSource` | _string_ |name of the object source. |
|
|`objectSource` | _string_ |Name of the source object |
|
||||||
|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`].|
|
|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`]|
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
// Use-case-1
|
||||||
|
// To copy an existing object to a new object with _no_ copy conditions.
|
||||||
|
copyConditions := minio.CopyConditions{}
|
||||||
|
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// All following conditions are allowed and can be combined together.
|
// Use-case-2
|
||||||
|
// To copy an existing object to a new object with the following copy conditions
|
||||||
|
// 1. that matches a given ETag
|
||||||
|
// 2. and modified after 1st April 2014
|
||||||
|
// 3. but unmodified since 23rd April 2014
|
||||||
|
|
||||||
// Set copy conditions.
|
// Initialize empty copy conditions.
|
||||||
var copyConds = minio.NewCopyConditions()
|
var copyConds = minio.CopyConditions{}
|
||||||
// Set modified condition, copy object modified since 2014 April.
|
|
||||||
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
|
||||||
|
|
||||||
// Set unmodified condition, copy object unmodified since 2014 April.
|
// copy object that matches the given ETag.
|
||||||
// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
|
||||||
|
|
||||||
// Set matching ETag condition, copy object which matches the following ETag.
|
// and modified after 1st April 2014
|
||||||
// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
|
copyConds.SetModified(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
// Set matching ETag except condition, copy object which does not match the following ETag.
|
// but unmodified since 23rd April 2014
|
||||||
// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
|
copyConds.SetUnmodified(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
err := minioClient.CopyObject("mybucket", "myobject", "/my-sourcebucketname/my-sourceobjectname", copyConds)
|
err := minioClient.CopyObject("mybucket", "myobject", "my-sourcebucketname/my-sourceobjectname", copyConds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
|
@ -523,7 +545,7 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="FPutObject"></a>
|
<a name="FPutObject"></a>
|
||||||
### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
|
### FPutObject(bucketName, objectName, filePath, contentType string) error
|
||||||
|
|
||||||
Uploads contents from a file to objectName.
|
Uploads contents from a file to objectName.
|
||||||
|
|
||||||
|
@ -533,16 +555,16 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`filePath` | _string_ |file path of the file to be uploaded. |
|
|`filePath` | _string_ |Path to file to be uploaded |
|
||||||
|`contentType` | _string_ |content type of the object. |
|
|`contentType` | _string_ |Content type of the object |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
FPutObject uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, FPutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
|
||||||
|
|
||||||
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
|
||||||
|
|
||||||
|
@ -557,7 +579,7 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="StatObject"></a>
|
<a name="StatObject"></a>
|
||||||
### StatObject(bucketName string, objectName string) (ObjectInfo, error)
|
### StatObject(bucketName, objectName string) (ObjectInfo, error)
|
||||||
|
|
||||||
Gets metadata of an object.
|
Gets metadata of an object.
|
||||||
|
|
||||||
|
@ -567,23 +589,23 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|
||||||
|
|
||||||
__Return Value__
|
__Return Value__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
|
|`objInfo` | _ObjectInfo_ |Object stat information |
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`objInfo.LastModified` | _time.Time_ |modified time stamp. |
|
|`objInfo.LastModified` | _time.Time_ |Time when object was last modified |
|
||||||
|`objInfo.ETag` | _string_ |etag of the object.|
|
|`objInfo.ETag` | _string_ |MD5 checksum of the object|
|
||||||
|`objInfo.ContentType` | _string_ |Content-Type of the object.|
|
|`objInfo.ContentType` | _string_ |Content type of the object|
|
||||||
|`objInfo.Size` | _int64_ |size of the object.|
|
|`objInfo.Size` | _int64_ |Size of the object|
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -601,7 +623,7 @@ fmt.Println(objInfo)
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="RemoveObject"></a>
|
<a name="RemoveObject"></a>
|
||||||
### RemoveObject(bucketName string, objectName string) error
|
### RemoveObject(bucketName, objectName string) error
|
||||||
|
|
||||||
Removes an object.
|
Removes an object.
|
||||||
|
|
||||||
|
@ -611,8 +633,8 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -624,10 +646,41 @@ if err != nil {
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
||||||
|
<a name="RemoveObjects"></a>
|
||||||
|
### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
|
||||||
|
|
||||||
|
Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time.
|
||||||
|
The errors observed are sent over the error channel.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|`objectsCh` | _chan string_ | Prefix of objects to be removed |
|
||||||
|
|
||||||
|
|
||||||
|
__Return Values__
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
|
||||||
|
for e := range errorCh {
|
||||||
|
fmt.Println("Error detected during deletion: " + e.Err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<a name="RemoveIncompleteUpload"></a>
|
<a name="RemoveIncompleteUpload"></a>
|
||||||
### RemoveIncompleteUpload(bucketName string, objectName string) error
|
### RemoveIncompleteUpload(bucketName, objectName string) error
|
||||||
|
|
||||||
Removes a partially uploaded object.
|
Removes a partially uploaded object.
|
||||||
|
|
||||||
|
@ -636,8 +689,8 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -656,7 +709,7 @@ if err != nil {
|
||||||
|
|
||||||
|
|
||||||
<a name="PresignedGetObject"></a>
|
<a name="PresignedGetObject"></a>
|
||||||
### PresignedGetObject(bucketName string, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
|
### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
|
||||||
|
|
||||||
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
|
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
|
||||||
|
|
||||||
|
@ -665,10 +718,10 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`expiry` | _time.Duration_ |expiry in seconds. |
|
|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
|
||||||
|`reqParams` | _url.Values_ |additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
|
|`reqParams` | _url.Values_ |Additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -690,7 +743,7 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="PresignedPutObject"></a>
|
<a name="PresignedPutObject"></a>
|
||||||
### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
|
### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
|
||||||
|
|
||||||
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
|
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
|
||||||
|
|
||||||
|
@ -703,9 +756,9 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectName` | _string_ |name of the object. |
|
|`objectName` | _string_ |Name of the object |
|
||||||
|`expiry` | _time.Duration_ |expiry in seconds. |
|
|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -720,7 +773,7 @@ if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println(presignedURL)
|
fmt.Println(presignedURL)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -779,22 +832,24 @@ fmt.Printf("%s\n", url)
|
||||||
## 5. Bucket policy/notification operations
|
## 5. Bucket policy/notification operations
|
||||||
|
|
||||||
<a name="SetBucketPolicy"></a>
|
<a name="SetBucketPolicy"></a>
|
||||||
### SetBucketPolicy(bucketname string, objectPrefix string, policy BucketPolicy) error
|
### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
|
||||||
|
|
||||||
Set access permissions on bucket or an object prefix.
|
Set access permissions on bucket or an object prefix.
|
||||||
|
|
||||||
|
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket.|
|
|`bucketName` | _string_ |Name of the bucket|
|
||||||
|`objectPrefix` | _string_ |name of the object prefix.|
|
|`objectPrefix` | _string_ |Name of the object prefix|
|
||||||
|`policy` | _BucketPolicy_ |policy can be:|
|
|`policy` | _policy.BucketPolicy_ |Policy can be one of the following: |
|
||||||
|| |BucketPolicyNone|
|
|| |policy.BucketPolicyNone|
|
||||||
| | |BucketPolicyReadOnly|
|
| | |policy.BucketPolicyReadOnly|
|
||||||
|| |BucketPolicyReadWrite|
|
|| |policy.BucketPolicyReadWrite|
|
||||||
| | |BucketPolicyWriteOnly|
|
| | |policy.BucketPolicyWriteOnly|
|
||||||
|
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
@ -802,7 +857,7 @@ __Return Values__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`err` | _error_ |standard error |
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -810,7 +865,7 @@ __Example__
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
|
||||||
err := minioClient.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
|
err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
|
@ -819,25 +874,27 @@ if err != nil {
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="GetBucketPolicy"></a>
|
<a name="GetBucketPolicy"></a>
|
||||||
### GetBucketPolicy(bucketName string, objectPrefix string) (BucketPolicy, error)
|
### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
|
||||||
|
|
||||||
Get access permissions on a bucket or a prefix.
|
Get access permissions on a bucket or a prefix.
|
||||||
|
|
||||||
|
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
|
||||||
|
|
||||||
__Parameters__
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`objectPrefix` | _string_ |name of the object prefix |
|
|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketPolicy` | _BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
|
|`bucketPolicy` | _policy.BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
|
||||||
|`err` | _error_ |standard error |
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -853,6 +910,43 @@ fmt.Println("Access permissions for mybucket is", bucketPolicy)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
<a name="ListBucketPolicies"></a>
|
||||||
|
### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
|
||||||
|
|
||||||
|
Get access permissions rules associated to the specified bucket and prefix.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|`objectPrefix` | _string_ |Prefix matching objects under the bucket |
|
||||||
|
|
||||||
|
__Return Values__
|
||||||
|
|
||||||
|
|
||||||
|
|Param |Type |Description |
|
||||||
|
|:---|:---| :---|
|
||||||
|
|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions |
|
||||||
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for resource, permission := range bucketPolicies {
|
||||||
|
fmt.Println(resource, " => ", permission)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
<a name="GetBucketNotification"></a>
|
<a name="GetBucketNotification"></a>
|
||||||
### GetBucketNotification(bucketName string) (BucketNotification, error)
|
### GetBucketNotification(bucketName string) (BucketNotification, error)
|
||||||
|
|
||||||
|
@ -863,7 +957,7 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
|
||||||
|
@ -871,7 +965,7 @@ __Return Values__
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
|
|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
|
||||||
|`err` | _error_ |standard error |
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -879,11 +973,12 @@ __Example__
|
||||||
```go
|
```go
|
||||||
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
|
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, topicConfig := range bucketNotification.TopicConfigs {
|
log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err)
|
||||||
|
}
|
||||||
|
for _, topicConfig := range bucketNotification.TopicConfigs {
|
||||||
for _, e := range topicConfig.Events {
|
for _, e := range topicConfig.Events {
|
||||||
fmt.Println(e + " event is enabled")
|
fmt.Println(e + " event is enabled")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -897,15 +992,15 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|`bucketNotification` | _BucketNotification_ |bucket notification. |
|
|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service |
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`err` | _error_ |standard error |
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -922,7 +1017,7 @@ bucketNotification := BucketNotification{}
|
||||||
bucketNotification.AddTopic(topicConfig)
|
bucketNotification.AddTopic(topicConfig)
|
||||||
err := c.SetBucketNotification(bucketName, bucketNotification)
|
err := c.SetBucketNotification(bucketName, bucketNotification)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Cannot set the bucket notification: " + err)
|
fmt.Println("Unable to set the bucket notification: " + err)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -936,14 +1031,14 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ |name of the bucket. |
|
|`bucketName` | _string_ |Name of the bucket |
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`err` | _error_ |standard error |
|
|`err` | _error_ |Standard Error |
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
|
||||||
|
@ -951,12 +1046,12 @@ __Example__
|
||||||
```go
|
```go
|
||||||
err := c.RemoveAllBucketNotification(bucketName)
|
err := c.RemoveAllBucketNotification(bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Cannot remove bucket notifications.")
|
fmt.Println("Unable to remove bucket notifications.", err)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
<a name="ListenBucketNotification"></a>
|
<a name="ListenBucketNotification"></a>
|
||||||
### ListenBucketNotification(bucketName string, accountArn Arn, doneCh chan<- struct{}) <-chan NotificationInfo
|
### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
|
||||||
|
|
||||||
ListenBucketNotification API receives bucket notification events through the
|
ListenBucketNotification API receives bucket notification events through the
|
||||||
notification channel. The returned notification channel has two fields
|
notification channel. The returned notification channel has two fields
|
||||||
|
@ -972,19 +1067,20 @@ __Parameters__
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`bucketName` | _string_ | Bucket to listen notifications from. |
|
|`bucketName` | _string_ | Bucket to listen notifications on |
|
||||||
|`accountArn` | _Arn_ | Unique account ID to listen notifications for. |
|
|`prefix` | _string_ | Object key prefix to filter notifications for |
|
||||||
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
|
|`suffix` | _string_ | Object key suffix to filter notifications for |
|
||||||
|
|`events` | _[]string_| Enables notifications for specific event types |
|
||||||
|
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator |
|
||||||
|
|
||||||
__Return Values__
|
__Return Values__
|
||||||
|
|
||||||
|
|
||||||
|Param |Type |Description |
|
|Param |Type |Description |
|
||||||
|:---|:---| :---|
|
|:---|:---| :---|
|
||||||
|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
|
|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket |
|
||||||
|`NotificationInfo` | _object_ | Notification object represents events info. |
|
|`NotificationInfo` | _object_ | Notification object represents events info |
|
||||||
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
|
|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
|
||||||
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
|
|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
|
||||||
|
|
||||||
|
|
||||||
__Example__
|
__Example__
|
||||||
|
@ -998,39 +1094,82 @@ doneCh := make(chan struct{})
|
||||||
// Indicate a background go-routine to exit cleanly upon return.
|
// Indicate a background go-routine to exit cleanly upon return.
|
||||||
defer close(doneCh)
|
defer close(doneCh)
|
||||||
|
|
||||||
// Fetch the bucket location.
|
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
||||||
location, err := minioClient.GetBucketLocation("YOUR-BUCKET")
|
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
||||||
if err != nil {
|
"s3:ObjectCreated:*",
|
||||||
log.Fatalln(err)
|
"s3:ObjectRemoved:*",
|
||||||
}
|
}, doneCh) {
|
||||||
|
|
||||||
// Construct a new account Arn.
|
|
||||||
accountArn := minio.NewArn("minio", "sns", location, "your-account-id", "listen")
|
|
||||||
topicConfig := minio.NewNotificationConfig(accountArn)
|
|
||||||
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
|
|
||||||
topicConfig.AddFilterPrefix("photos/")
|
|
||||||
topicConfig.AddFilterSuffix(".jpg")
|
|
||||||
|
|
||||||
// Now, set all previously created notification configs
|
|
||||||
bucketNotification := minio.BucketNotification{}
|
|
||||||
bucketNotification.AddTopic(topicConfig)
|
|
||||||
err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln("Error: " + err.Error())
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
|
|
||||||
// Listen for bucket notifications on "mybucket" filtered by accountArn "arn:minio:sns:<location>:<your-account-id>:listen".
|
|
||||||
for notificationInfo := range s3Client.ListenBucketNotification("mybucket", accountArn, doneCh) {
|
|
||||||
if notificationInfo.Err != nil {
|
if notificationInfo.Err != nil {
|
||||||
fmt.Println(notificationInfo.Err)
|
log.Fatalln(notificationInfo.Err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
fmt.Println(notificationInfo)
|
log.Println(notificationInfo)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## 6. Explore Further
|
## 6. Client custom settings
|
||||||
|
|
||||||
|
<a name="SetAppInfo"></a>
|
||||||
|
### SetAppInfo(appName, appVersion string)
|
||||||
|
Adds application details to User-Agent.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
| Param | Type | Description |
|
||||||
|
|---|---|---|
|
||||||
|
|`appName` | _string_ | Name of the application performing the API requests. |
|
||||||
|
| `appVersion`| _string_ | Version of the application performing the API requests. |
|
||||||
|
|
||||||
|
|
||||||
|
__Example__
|
||||||
|
|
||||||
|
|
||||||
|
```go
|
||||||
|
|
||||||
|
// Set Application name and version to be used in subsequent API requests.
|
||||||
|
minioClient.SetAppInfo("myCloudApp", "1.0.0")
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
<a name="SetCustomTransport"></a>
|
||||||
|
### SetCustomTransport(customHTTPTransport http.RoundTripper)
|
||||||
|
Overrides default HTTP transport. This is usually needed for debugging
|
||||||
|
or for adding custom TLS certificates.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
| Param | Type | Description |
|
||||||
|
|---|---|---|
|
||||||
|
|`customHTTPTransport` | _http.RoundTripper_ | Custom transport e.g, to trace API requests and responses for debugging purposes.|
|
||||||
|
|
||||||
|
|
||||||
|
<a name="TraceOn"></a>
|
||||||
|
### TraceOn(outputStream io.Writer)
|
||||||
|
Enables HTTP tracing. The trace is written to the io.Writer
|
||||||
|
provided. If outputStream is nil, trace is written to os.Stdout.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
| Param | Type | Description |
|
||||||
|
|---|---|---|
|
||||||
|
|`outputStream` | _io.Writer_ | HTTP trace is written into outputStream.|
|
||||||
|
|
||||||
|
|
||||||
|
<a name="TraceOff"></a>
|
||||||
|
### TraceOff()
|
||||||
|
Disables HTTP tracing.
|
||||||
|
|
||||||
|
<a name="SetS3TransferAccelerate"></a>
|
||||||
|
### SetS3TransferAccelerate(acceleratedEndpoint string)
|
||||||
|
Set AWS S3 transfer acceleration endpoint for all API requests hereafter.
|
||||||
|
NOTE: This API applies only to AWS S3 and ignored with other S3 compatible object storage services.
|
||||||
|
|
||||||
|
__Parameters__
|
||||||
|
|
||||||
|
| Param | Type | Description |
|
||||||
|
|---|---|---|
|
||||||
|
|`acceleratedEndpoint` | _string_ | Set to new S3 transfer acceleration endpoint.|
|
||||||
|
|
||||||
|
|
||||||
|
## 7. Explore Further
|
||||||
|
|
||||||
- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
|
- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func main() {
|
||||||
|
|
||||||
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
// determined based on the Endpoint value.
|
// determined based on the Endpoint value.
|
||||||
minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
@ -46,30 +46,11 @@ func main() {
|
||||||
// Indicate to our routine to exit cleanly upon return.
|
// Indicate to our routine to exit cleanly upon return.
|
||||||
defer close(doneCh)
|
defer close(doneCh)
|
||||||
|
|
||||||
// Fetch the bucket location.
|
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
|
||||||
location, err := minioClient.GetBucketLocation("YOUR-BUCKET")
|
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
|
||||||
if err != nil {
|
"s3:ObjectCreated:*",
|
||||||
log.Fatalln(err)
|
"s3:ObjectRemoved:*",
|
||||||
}
|
}, doneCh) {
|
||||||
|
|
||||||
// Construct a new account Arn.
|
|
||||||
accountArn := minio.NewArn("minio", "sns", location, "your-account-id", "listen")
|
|
||||||
topicConfig := minio.NewNotificationConfig(accountArn)
|
|
||||||
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
|
|
||||||
topicConfig.AddFilterPrefix("photos/")
|
|
||||||
topicConfig.AddFilterSuffix(".jpg")
|
|
||||||
|
|
||||||
// Now, set all previously created notification configs
|
|
||||||
bucketNotification := minio.BucketNotification{}
|
|
||||||
bucketNotification.AddTopic(topicConfig)
|
|
||||||
err = minioClient.SetBucketNotification("YOUR-BUCKET", bucketNotification)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalln("Error: " + err.Error())
|
|
||||||
}
|
|
||||||
log.Println("Success")
|
|
||||||
|
|
||||||
// Listen for bucket notifications on "mybucket" filtered by accountArn "arn:minio:sns:<location>:<your-account-id>:listen".
|
|
||||||
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", accountArn, doneCh) {
|
|
||||||
if notificationInfo.Err != nil {
|
if notificationInfo.Err != nil {
|
||||||
log.Fatalln(notificationInfo.Err)
|
log.Fatalln(notificationInfo.Err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,10 +38,14 @@ func main() {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s3Client.BucketExists("my-bucketname")
|
found, err := s3Client.BucketExists("my-bucketname")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("Success")
|
if found {
|
||||||
|
log.Println("Bucket found.")
|
||||||
|
} else {
|
||||||
|
log.Println("Bucket not found.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func main() {
|
||||||
// All following conditions are allowed and can be combined together.
|
// All following conditions are allowed and can be combined together.
|
||||||
|
|
||||||
// Set copy conditions.
|
// Set copy conditions.
|
||||||
var copyConds = minio.NewCopyConditions()
|
var copyConds = minio.CopyConditions{}
|
||||||
// Set modified condition, copy object modified since 2014 April.
|
// Set modified condition, copy object modified since 2014 April.
|
||||||
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
|
||||||
|
|
||||||
|
|
56
vendor/src/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
vendored
Normal file
56
vendor/src/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
|
||||||
|
// dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3Client.TraceOn(os.Stderr)
|
||||||
|
|
||||||
|
// Fetch the policy at 'my-objectprefix'.
|
||||||
|
policies, err := s3Client.ListBucketPolicies("my-bucketname", "my-objectprefix")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListBucketPolicies returns a map of objects policy rules and their associated permissions
|
||||||
|
// e.g. mybucket/downloadfolder/* => readonly
|
||||||
|
// mybucket/shared/* => readwrite
|
||||||
|
|
||||||
|
for resource, permission := range policies {
|
||||||
|
log.Println(resource, " => ", permission)
|
||||||
|
}
|
||||||
|
}
|
56
vendor/src/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
vendored
Normal file
56
vendor/src/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
|
||||||
|
// my-objectname are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable S3 transfer accelerate endpoint.
|
||||||
|
s3Client.S3TransferAccelerate("s3-accelerate.amazonaws.com")
|
||||||
|
|
||||||
|
object, err := os.Open("my-testfile")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
defer object.Close()
|
||||||
|
|
||||||
|
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
|
||||||
|
}
|
61
vendor/src/github.com/minio/minio-go/examples/s3/removeobjects.go
vendored
Normal file
61
vendor/src/github.com/minio/minio-go/examples/s3/removeobjects.go
vendored
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
|
||||||
|
// are dummy values, please replace them with original values.
|
||||||
|
|
||||||
|
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
|
||||||
|
// This boolean value is the last argument for New().
|
||||||
|
|
||||||
|
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
|
||||||
|
// determined based on the Endpoint value.
|
||||||
|
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objectsCh := make(chan string)
|
||||||
|
|
||||||
|
// Send object names that are needed to be removed to objectsCh
|
||||||
|
go func() {
|
||||||
|
defer close(objectsCh)
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Call RemoveObjects API
|
||||||
|
errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh)
|
||||||
|
|
||||||
|
// Print errors received from RemoveObjects API
|
||||||
|
for e := range errorCh {
|
||||||
|
log.Fatalln("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Success")
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/minio/minio-go"
|
"github.com/minio/minio-go"
|
||||||
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -41,11 +42,11 @@ func main() {
|
||||||
// s3Client.TraceOn(os.Stderr)
|
// s3Client.TraceOn(os.Stderr)
|
||||||
|
|
||||||
// Description of policy input.
|
// Description of policy input.
|
||||||
// minio.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
|
// policy.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
|
||||||
// minio.BucketPolicyReadOnly - Set read-only operations at a prefix.
|
// policy.BucketPolicyReadOnly - Set read-only operations at a prefix.
|
||||||
// minio.BucketPolicyWriteOnly - Set write-only operations at a prefix.
|
// policy.BucketPolicyWriteOnly - Set write-only operations at a prefix.
|
||||||
// minio.BucketPolicyReadWrite - Set read-write operations at a prefix.
|
// policy.BucketPolicyReadWrite - Set read-write operations at a prefix.
|
||||||
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
|
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", policy.BucketPolicyReadWrite)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ const (
|
||||||
BucketPolicyWriteOnly = "writeonly"
|
BucketPolicyWriteOnly = "writeonly"
|
||||||
)
|
)
|
||||||
|
|
||||||
// isValidBucketPolicy - Is provided policy value supported.
|
// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
|
||||||
func (p BucketPolicy) IsValidBucketPolicy() bool {
|
func (p BucketPolicy) IsValidBucketPolicy() bool {
|
||||||
switch p {
|
switch p {
|
||||||
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
|
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
|
||||||
|
@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
|
||||||
return readOnly, writeOnly
|
return readOnly, writeOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns policy of given bucket name, prefix in given statements.
|
// GetPolicy - Returns policy of given bucket name, prefix in given statements.
|
||||||
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
|
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
|
||||||
bucketResource := awsResourcePrefix + bucketName
|
bucketResource := awsResourcePrefix + bucketName
|
||||||
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
|
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
|
||||||
|
@ -563,8 +563,34 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
|
||||||
return policy
|
return policy
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns new statements containing policy of given bucket name and
|
// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
|
||||||
// prefix are appended.
|
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
|
||||||
|
policyRules := map[string]BucketPolicy{}
|
||||||
|
objResources := set.NewStringSet()
|
||||||
|
// Search all resources related to objects policy
|
||||||
|
for _, s := range statements {
|
||||||
|
for r := range s.Resources {
|
||||||
|
if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
|
||||||
|
objResources.Add(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Pretend that policy resource as an actual object and fetch its policy
|
||||||
|
for r := range objResources {
|
||||||
|
// Put trailing * if exists in asterisk
|
||||||
|
asterisk := ""
|
||||||
|
if strings.HasSuffix(r, "*") {
|
||||||
|
r = r[:len(r)-1]
|
||||||
|
asterisk = "*"
|
||||||
|
}
|
||||||
|
objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
|
||||||
|
p := GetPolicy(statements, bucketName, objectPath)
|
||||||
|
policyRules[bucketName+"/"+objectPath+asterisk] = p
|
||||||
|
}
|
||||||
|
return policyRules
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
|
||||||
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
|
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
|
||||||
out := removeStatements(statements, bucketName, prefix)
|
out := removeStatements(statements, bucketName, prefix)
|
||||||
// fmt.Println("out = ")
|
// fmt.Println("out = ")
|
||||||
|
|
|
@ -19,6 +19,7 @@ package policy
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/set"
|
"github.com/minio/minio-go/pkg/set"
|
||||||
|
@ -1376,6 +1377,104 @@ func TestGetObjectPolicy(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPolicyRules is called and the result is validated
|
||||||
|
func TestListBucketPolicies(t *testing.T) {
|
||||||
|
|
||||||
|
// Condition for read objects
|
||||||
|
downloadCondMap := make(ConditionMap)
|
||||||
|
downloadCondKeyMap := make(ConditionKeyMap)
|
||||||
|
downloadCondKeyMap.Add("s3:prefix", set.CreateStringSet("download"))
|
||||||
|
downloadCondMap.Add("StringEquals", downloadCondKeyMap)
|
||||||
|
|
||||||
|
// Condition for readwrite objects
|
||||||
|
downloadUploadCondMap := make(ConditionMap)
|
||||||
|
downloadUploadCondKeyMap := make(ConditionKeyMap)
|
||||||
|
downloadUploadCondKeyMap.Add("s3:prefix", set.CreateStringSet("both"))
|
||||||
|
downloadUploadCondMap.Add("StringEquals", downloadUploadCondKeyMap)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
statements []Statement
|
||||||
|
bucketName string
|
||||||
|
prefix string
|
||||||
|
expectedResult map[string]BucketPolicy
|
||||||
|
}{
|
||||||
|
// Empty statements, bucket name and prefix.
|
||||||
|
{[]Statement{}, "", "", map[string]BucketPolicy{}},
|
||||||
|
// Non-empty statements, empty bucket name and empty prefix.
|
||||||
|
{[]Statement{{
|
||||||
|
Actions: readOnlyBucketActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
}}, "", "", map[string]BucketPolicy{}},
|
||||||
|
// Empty statements, non-empty bucket name and empty prefix.
|
||||||
|
{[]Statement{}, "mybucket", "", map[string]BucketPolicy{}},
|
||||||
|
// Readonly object statement
|
||||||
|
{[]Statement{
|
||||||
|
{
|
||||||
|
Actions: commonBucketActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Actions: readOnlyBucketActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Conditions: downloadCondMap,
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Actions: readOnlyObjectActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/download*"),
|
||||||
|
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/download*": BucketPolicyReadOnly}},
|
||||||
|
// Write Only
|
||||||
|
{[]Statement{
|
||||||
|
{
|
||||||
|
Actions: commonBucketActions.Union(writeOnlyBucketActions),
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Actions: writeOnlyObjectActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/upload*"),
|
||||||
|
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/upload*": BucketPolicyWriteOnly}},
|
||||||
|
// Readwrite
|
||||||
|
{[]Statement{
|
||||||
|
{
|
||||||
|
Actions: commonBucketActions.Union(writeOnlyBucketActions),
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Actions: readOnlyBucketActions,
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Conditions: downloadUploadCondMap,
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Actions: writeOnlyObjectActions.Union(readOnlyObjectActions),
|
||||||
|
Effect: "Allow",
|
||||||
|
Principal: User{AWS: set.CreateStringSet("*")},
|
||||||
|
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/both*"),
|
||||||
|
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/both*": BucketPolicyReadWrite}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
policyRules := GetPolicies(testCase.statements, testCase.bucketName)
|
||||||
|
if !reflect.DeepEqual(testCase.expectedResult, policyRules) {
|
||||||
|
t.Fatalf("%+v:\n expected: %+v, got: %+v", testCase, testCase.expectedResult, policyRules)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// GetPolicy() is called and the result is validated.
|
// GetPolicy() is called and the result is validated.
|
||||||
func TestGetPolicy(t *testing.T) {
|
func TestGetPolicy(t *testing.T) {
|
||||||
helloCondMap := make(ConditionMap)
|
helloCondMap := make(ConditionMap)
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package minio
|
package s3signer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -29,6 +29,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Signature and API related constants.
|
// Signature and API related constants.
|
||||||
|
@ -45,22 +47,22 @@ func encodeURL2Path(u *url.URL) (path string) {
|
||||||
bucketName := hostSplits[0]
|
bucketName := hostSplits[0]
|
||||||
path = "/" + bucketName
|
path = "/" + bucketName
|
||||||
path += u.Path
|
path += u.Path
|
||||||
path = urlEncodePath(path)
|
path = s3utils.EncodePath(path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
|
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
|
||||||
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
|
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
|
||||||
path += u.Path
|
path += u.Path
|
||||||
path = urlEncodePath(path)
|
path = s3utils.EncodePath(path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
path = urlEncodePath(u.Path)
|
path = s3utils.EncodePath(u.Path)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// preSignV2 - presign the request in following style.
|
// PreSignV2 - presign the request in following style.
|
||||||
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
|
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
|
||||||
func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
|
func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
|
||||||
// Presign is not needed for anonymous credentials.
|
// Presign is not needed for anonymous credentials.
|
||||||
if accessKeyID == "" || secretAccessKey == "" {
|
if accessKeyID == "" || secretAccessKey == "" {
|
||||||
return &req
|
return &req
|
||||||
|
@ -95,18 +97,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
|
||||||
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
|
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
|
||||||
|
|
||||||
// Encode query and save.
|
// Encode query and save.
|
||||||
req.URL.RawQuery = queryEncode(query)
|
req.URL.RawQuery = s3utils.QueryEncode(query)
|
||||||
|
|
||||||
// Save signature finally.
|
// Save signature finally.
|
||||||
req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
|
req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
|
||||||
|
|
||||||
// Return.
|
// Return.
|
||||||
return &req
|
return &req
|
||||||
}
|
}
|
||||||
|
|
||||||
// postPresignSignatureV2 - presigned signature for PostPolicy
|
// PostPresignSignatureV2 - presigned signature for PostPolicy
|
||||||
// request.
|
// request.
|
||||||
func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
|
func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
|
||||||
hm := hmac.New(sha1.New, []byte(secretAccessKey))
|
hm := hmac.New(sha1.New, []byte(secretAccessKey))
|
||||||
hm.Write([]byte(policyBase64))
|
hm.Write([]byte(policyBase64))
|
||||||
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
|
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
|
||||||
|
@ -129,8 +131,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
|
||||||
//
|
//
|
||||||
// CanonicalizedProtocolHeaders = <described below>
|
// CanonicalizedProtocolHeaders = <described below>
|
||||||
|
|
||||||
// signV2 sign the request before Do() (AWS Signature Version 2).
|
// SignV2 sign the request before Do() (AWS Signature Version 2).
|
||||||
func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
|
func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
|
||||||
// Signature calculation is not needed for anonymous credentials.
|
// Signature calculation is not needed for anonymous credentials.
|
||||||
if accessKeyID == "" || secretAccessKey == "" {
|
if accessKeyID == "" || secretAccessKey == "" {
|
||||||
return &req
|
return &req
|
||||||
|
@ -257,6 +259,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
|
||||||
// have signature-related issues
|
// have signature-related issues
|
||||||
var resourceList = []string{
|
var resourceList = []string{
|
||||||
"acl",
|
"acl",
|
||||||
|
"delete",
|
||||||
"location",
|
"location",
|
||||||
"logging",
|
"logging",
|
||||||
"notification",
|
"notification",
|
||||||
|
@ -286,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
|
||||||
// Get encoded URL path.
|
// Get encoded URL path.
|
||||||
if len(requestURL.Query()) > 0 {
|
if len(requestURL.Query()) > 0 {
|
||||||
// Keep the usual queries unescaped for string to sign.
|
// Keep the usual queries unescaped for string to sign.
|
||||||
query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
|
query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
|
||||||
path = path + "?" + query
|
path = path + "?" + query
|
||||||
}
|
}
|
||||||
buf.WriteString(path)
|
buf.WriteString(path)
|
|
@ -14,7 +14,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package minio
|
package s3signer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
|
@ -14,7 +14,7 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package minio
|
package s3signer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -24,6 +24,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Signature and API related constants.
|
// Signature and API related constants.
|
||||||
|
@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string {
|
||||||
return scope
|
return scope
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCredential generate a credential string.
|
// GetCredential generate a credential string.
|
||||||
func getCredential(accessKeyID, location string, t time.Time) string {
|
func GetCredential(accessKeyID, location string, t time.Time) string {
|
||||||
scope := getScope(location, t)
|
scope := getScope(location, t)
|
||||||
return accessKeyID + "/" + scope
|
return accessKeyID + "/" + scope
|
||||||
}
|
}
|
||||||
|
@ -185,7 +187,7 @@ func getCanonicalRequest(req http.Request) string {
|
||||||
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
||||||
canonicalRequest := strings.Join([]string{
|
canonicalRequest := strings.Join([]string{
|
||||||
req.Method,
|
req.Method,
|
||||||
urlEncodePath(req.URL.Path),
|
s3utils.EncodePath(req.URL.Path),
|
||||||
req.URL.RawQuery,
|
req.URL.RawQuery,
|
||||||
getCanonicalHeaders(req),
|
getCanonicalHeaders(req),
|
||||||
getSignedHeaders(req),
|
getSignedHeaders(req),
|
||||||
|
@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
|
||||||
return stringToSign
|
return stringToSign
|
||||||
}
|
}
|
||||||
|
|
||||||
// preSignV4 presign the request, in accordance with
|
// PreSignV4 presign the request, in accordance with
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
|
||||||
func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
|
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
|
||||||
// Presign is not needed for anonymous credentials.
|
// Presign is not needed for anonymous credentials.
|
||||||
if accessKeyID == "" || secretAccessKey == "" {
|
if accessKeyID == "" || secretAccessKey == "" {
|
||||||
return &req
|
return &req
|
||||||
|
@ -214,7 +216,7 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
|
||||||
t := time.Now().UTC()
|
t := time.Now().UTC()
|
||||||
|
|
||||||
// Get credential string.
|
// Get credential string.
|
||||||
credential := getCredential(accessKeyID, location, t)
|
credential := GetCredential(accessKeyID, location, t)
|
||||||
|
|
||||||
// Get all signed headers.
|
// Get all signed headers.
|
||||||
signedHeaders := getSignedHeaders(req)
|
signedHeaders := getSignedHeaders(req)
|
||||||
|
@ -246,9 +248,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
|
||||||
return &req
|
return &req
|
||||||
}
|
}
|
||||||
|
|
||||||
// postPresignSignatureV4 - presigned signature for PostPolicy
|
// PostPresignSignatureV4 - presigned signature for PostPolicy
|
||||||
// requests.
|
// requests.
|
||||||
func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
|
func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
|
||||||
// Get signining key.
|
// Get signining key.
|
||||||
signingkey := getSigningKey(secretAccessKey, location, t)
|
signingkey := getSigningKey(secretAccessKey, location, t)
|
||||||
// Calculate signature.
|
// Calculate signature.
|
||||||
|
@ -256,9 +258,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
|
||||||
return signature
|
return signature
|
||||||
}
|
}
|
||||||
|
|
||||||
// signV4 sign the request before Do(), in accordance with
|
// SignV4 sign the request before Do(), in accordance with
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
|
||||||
func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
|
func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
|
||||||
// Signature calculation is not needed for anonymous credentials.
|
// Signature calculation is not needed for anonymous credentials.
|
||||||
if accessKeyID == "" || secretAccessKey == "" {
|
if accessKeyID == "" || secretAccessKey == "" {
|
||||||
return &req
|
return &req
|
||||||
|
@ -280,7 +282,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
|
||||||
signingKey := getSigningKey(secretAccessKey, location, t)
|
signingKey := getSigningKey(secretAccessKey, location, t)
|
||||||
|
|
||||||
// Get credential string.
|
// Get credential string.
|
||||||
credential := getCredential(accessKeyID, location, t)
|
credential := GetCredential(accessKeyID, location, t)
|
||||||
|
|
||||||
// Get all signed headers.
|
// Get all signed headers.
|
||||||
signedHeaders := getSignedHeaders(req)
|
signedHeaders := getSignedHeaders(req)
|
70
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
vendored
Normal file
70
vendor/src/github.com/minio/minio-go/pkg/s3signer/request-signature_test.go
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests signature calculation.
|
||||||
|
func TestSignatureCalculation(t *testing.T) {
|
||||||
|
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
req = SignV4(*req, "", "", "us-east-1")
|
||||||
|
if req.Header.Get("Authorization") != "" {
|
||||||
|
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = PreSignV4(*req, "", "", "us-east-1", 0)
|
||||||
|
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
||||||
|
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = SignV2(*req, "", "")
|
||||||
|
if req.Header.Get("Authorization") != "" {
|
||||||
|
t.Fatal("Error: anonymous credentials should not have Authorization header.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = PreSignV2(*req, "", "", 0)
|
||||||
|
if strings.Contains(req.URL.RawQuery, "Signature") {
|
||||||
|
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
|
||||||
|
if req.Header.Get("Authorization") == "" {
|
||||||
|
t.Fatal("Error: normal credentials should have Authorization header.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
|
||||||
|
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
|
||||||
|
t.Fatal("Error: normal credentials should have Signature query resource.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
|
||||||
|
if req.Header.Get("Authorization") == "" {
|
||||||
|
t.Fatal("Error: normal credentials should have Authorization header.")
|
||||||
|
}
|
||||||
|
|
||||||
|
req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
|
||||||
|
if !strings.Contains(req.URL.RawQuery, "Signature") {
|
||||||
|
t.Fatal("Error: normal credentials should not have Signature query resource.")
|
||||||
|
}
|
||||||
|
}
|
39
vendor/src/github.com/minio/minio-go/pkg/s3signer/utils.go
vendored
Normal file
39
vendor/src/github.com/minio/minio-go/pkg/s3signer/utils.go
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
|
||||||
|
const unsignedPayload = "UNSIGNED-PAYLOAD"
|
||||||
|
|
||||||
|
// sum256 calculate sha256 sum for an input byte array.
|
||||||
|
func sum256(data []byte) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sumHMAC calculate hmac between two input byte array.
|
||||||
|
func sumHMAC(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
66
vendor/src/github.com/minio/minio-go/pkg/s3signer/utils_test.go
vendored
Normal file
66
vendor/src/github.com/minio/minio-go/pkg/s3signer/utils_test.go
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3signer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests url encoding.
|
||||||
|
func TestEncodeURL2Path(t *testing.T) {
|
||||||
|
type urlStrings struct {
|
||||||
|
objName string
|
||||||
|
encodedObjName string
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketName := "bucketName"
|
||||||
|
want := []urlStrings{
|
||||||
|
{
|
||||||
|
objName: "本語",
|
||||||
|
encodedObjName: "%E6%9C%AC%E8%AA%9E",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
objName: "本語.1",
|
||||||
|
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
objName: ">123>3123123",
|
||||||
|
encodedObjName: "%3E123%3E3123123",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
objName: "test 1 2.txt",
|
||||||
|
encodedObjName: "test%201%202.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
objName: "test++ 1.txt",
|
||||||
|
encodedObjName: "test%2B%2B%201.txt",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, o := range want {
|
||||||
|
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Error:", err)
|
||||||
|
}
|
||||||
|
urlPath := "/" + bucketName + "/" + o.encodedObjName
|
||||||
|
if urlPath != encodeURL2Path(u) {
|
||||||
|
t.Fatal("Error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
183
vendor/src/github.com/minio/minio-go/pkg/s3utils/utils.go
vendored
Normal file
183
vendor/src/github.com/minio/minio-go/pkg/s3utils/utils.go
vendored
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sentinel URL is the default url value which is invalid.
|
||||||
|
var sentinelURL = url.URL{}
|
||||||
|
|
||||||
|
// IsValidDomain validates if input string is a valid domain name.
|
||||||
|
func IsValidDomain(host string) bool {
|
||||||
|
// See RFC 1035, RFC 3696.
|
||||||
|
host = strings.TrimSpace(host)
|
||||||
|
if len(host) == 0 || len(host) > 255 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// host cannot start or end with "-"
|
||||||
|
if host[len(host)-1:] == "-" || host[:1] == "-" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// host cannot start or end with "_"
|
||||||
|
if host[len(host)-1:] == "_" || host[:1] == "_" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// host cannot start or end with a "."
|
||||||
|
if host[len(host)-1:] == "." || host[:1] == "." {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// All non alphanumeric characters are invalid.
|
||||||
|
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// No need to regexp match, since the list is non-exhaustive.
|
||||||
|
// We let it valid and fail later.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidIP parses input string for ip address validity.
|
||||||
|
func IsValidIP(ip string) bool {
|
||||||
|
return net.ParseIP(ip) != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsVirtualHostSupported - verifies if bucketName can be part of
|
||||||
|
// virtual host. Currently only Amazon S3 and Google Cloud Storage
|
||||||
|
// would support this.
|
||||||
|
func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
|
||||||
|
if endpointURL == sentinelURL {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// bucketName can be valid but '.' in the hostname will fail SSL
|
||||||
|
// certificate validation. So do not use host-style for such buckets.
|
||||||
|
if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Return true for all other cases
|
||||||
|
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
|
||||||
|
func IsAmazonEndpoint(endpointURL url.URL) bool {
|
||||||
|
if IsAmazonChinaEndpoint(endpointURL) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return endpointURL.Host == "s3.amazonaws.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
|
||||||
|
// Customers who wish to use the new Beijing Region are required
|
||||||
|
// to sign up for a separate set of account credentials unique to
|
||||||
|
// the China (Beijing) Region. Customers with existing AWS credentials
|
||||||
|
// will not be able to access resources in the new Region, and vice versa.
|
||||||
|
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
|
||||||
|
func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
|
||||||
|
if endpointURL == sentinelURL {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
|
||||||
|
func IsGoogleEndpoint(endpointURL url.URL) bool {
|
||||||
|
if endpointURL == sentinelURL {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return endpointURL.Host == "storage.googleapis.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expects ascii encoded strings - from output of urlEncodePath
|
||||||
|
func percentEncodeSlash(s string) string {
|
||||||
|
return strings.Replace(s, "/", "%2F", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryEncode - encodes query values in their URL encoded form. In
|
||||||
|
// addition to the percent encoding performed by urlEncodePath() used
|
||||||
|
// here, it also percent encodes '/' (forward slash)
|
||||||
|
func QueryEncode(v url.Values) string {
|
||||||
|
if v == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
keys := make([]string, 0, len(v))
|
||||||
|
for k := range v {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
vs := v[k]
|
||||||
|
prefix := percentEncodeSlash(EncodePath(k)) + "="
|
||||||
|
for _, v := range vs {
|
||||||
|
if buf.Len() > 0 {
|
||||||
|
buf.WriteByte('&')
|
||||||
|
}
|
||||||
|
buf.WriteString(prefix)
|
||||||
|
buf.WriteString(percentEncodeSlash(EncodePath(v)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// if object matches reserved string, no need to encode them
|
||||||
|
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
||||||
|
|
||||||
|
// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
||||||
|
//
|
||||||
|
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
||||||
|
// non english characters cannot be parsed due to the nature in which url.Encode() is written
|
||||||
|
//
|
||||||
|
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
||||||
|
// pretty much every UTF-8 character.
|
||||||
|
func EncodePath(pathName string) string {
|
||||||
|
if reservedObjectNames.MatchString(pathName) {
|
||||||
|
return pathName
|
||||||
|
}
|
||||||
|
var encodedPathname string
|
||||||
|
for _, s := range pathName {
|
||||||
|
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
|
||||||
|
encodedPathname = encodedPathname + string(s)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch s {
|
||||||
|
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
|
||||||
|
encodedPathname = encodedPathname + string(s)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
len := utf8.RuneLen(s)
|
||||||
|
if len < 0 {
|
||||||
|
// if utf8 cannot convert return the same string as is
|
||||||
|
return pathName
|
||||||
|
}
|
||||||
|
u := make([]byte, len)
|
||||||
|
utf8.EncodeRune(u, s)
|
||||||
|
for _, r := range u {
|
||||||
|
hex := hex.EncodeToString([]byte{r})
|
||||||
|
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return encodedPathname
|
||||||
|
}
|
284
vendor/src/github.com/minio/minio-go/pkg/s3utils/utils_test.go
vendored
Normal file
284
vendor/src/github.com/minio/minio-go/pkg/s3utils/utils_test.go
vendored
Normal file
|
@ -0,0 +1,284 @@
|
||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package s3utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests for 'isValidDomain(host string) bool'.
|
||||||
|
func TestIsValidDomain(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
host string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"s3.amazonaws.com", true},
|
||||||
|
{"s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
{"s3.amazonaws.com_", false},
|
||||||
|
{"%$$$", false},
|
||||||
|
{"s3.amz.test.com", true},
|
||||||
|
{"s3.%%", false},
|
||||||
|
{"localhost", true},
|
||||||
|
{"-localhost", false},
|
||||||
|
{"", false},
|
||||||
|
{"\n \t", false},
|
||||||
|
{" ", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := IsValidDomain(testCase.host)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate IP address validator.
|
||||||
|
func TestIsValidIP(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
ip string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"192.168.1.1", true},
|
||||||
|
{"192.168.1", false},
|
||||||
|
{"192.168.1.1.1", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := IsValidIP(testCase.ip)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate virtual host validator.
|
||||||
|
func TestIsVirtualHostSupported(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
bucket string
|
||||||
|
// Expeceted result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://s3.amazonaws.com", "my-bucket", true},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
|
||||||
|
{"https://s3.amazonaws.com", "my-bucket.", false},
|
||||||
|
{"https://amazons3.amazonaws.com", "my-bucket.", false},
|
||||||
|
{"https://storage.googleapis.com/", "my-bucket", true},
|
||||||
|
{"https://mystorage.googleapis.com/", "my-bucket", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
u, err := url.Parse(testCase.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
|
}
|
||||||
|
result := IsVirtualHostSupported(*u, testCase.bucket)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Amazon endpoint validator.
|
||||||
|
func TestIsAmazonEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"http://storage.googleapis.com", false},
|
||||||
|
{"https://storage.googleapis.com", false},
|
||||||
|
{"storage.googleapis.com", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"https://amazons3.amazonaws.com", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// valid inputs.
|
||||||
|
{"https://s3.amazonaws.com", true},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
u, err := url.Parse(testCase.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
|
}
|
||||||
|
result := IsAmazonEndpoint(*u)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Amazon S3 China endpoint validator.
|
||||||
|
func TestIsAmazonChinaEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"http://storage.googleapis.com", false},
|
||||||
|
{"https://storage.googleapis.com", false},
|
||||||
|
{"storage.googleapis.com", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"https://amazons3.amazonaws.com", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// s3.amazonaws.com is not a valid Amazon S3 China end point.
|
||||||
|
{"https://s3.amazonaws.com", false},
|
||||||
|
// valid input.
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
u, err := url.Parse(testCase.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
|
}
|
||||||
|
result := IsAmazonChinaEndpoint(*u)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate Google Cloud end point validator.
|
||||||
|
func TestIsGoogleEndpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
url string
|
||||||
|
// Expected result.
|
||||||
|
result bool
|
||||||
|
}{
|
||||||
|
{"192.168.1.1", false},
|
||||||
|
{"https://192.168.1.1", false},
|
||||||
|
{"s3.amazonaws.com", false},
|
||||||
|
{"http://s3.amazonaws.com", false},
|
||||||
|
{"https://s3.amazonaws.com", false},
|
||||||
|
{"https://s3.cn-north-1.amazonaws.com.cn", false},
|
||||||
|
{"-192.168.1.1", false},
|
||||||
|
{"260.192.1.1", false},
|
||||||
|
// valid inputs.
|
||||||
|
{"http://storage.googleapis.com", true},
|
||||||
|
{"https://storage.googleapis.com", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
u, err := url.Parse(testCase.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
|
}
|
||||||
|
result := IsGoogleEndpoint(*u)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPercentEncodeSlash(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
output string
|
||||||
|
}{
|
||||||
|
{"test123", "test123"},
|
||||||
|
{"abc,+_1", "abc,+_1"},
|
||||||
|
{"%40prefix=test%40123", "%40prefix=test%40123"},
|
||||||
|
{"key1=val1/val2", "key1=val1%2Fval2"},
|
||||||
|
{"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
receivedOutput := percentEncodeSlash(testCase.input)
|
||||||
|
if testCase.output != receivedOutput {
|
||||||
|
t.Errorf(
|
||||||
|
"Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
|
||||||
|
i+1, testCase.input, testCase.output,
|
||||||
|
receivedOutput,
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the query encoder.
|
||||||
|
func TestQueryEncode(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
queryKey string
|
||||||
|
valueToEncode []string
|
||||||
|
// Expected result.
|
||||||
|
result string
|
||||||
|
}{
|
||||||
|
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
|
||||||
|
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
|
||||||
|
{"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
|
||||||
|
{"prefix", []string{"test#123"}, "prefix=test%23123"},
|
||||||
|
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
|
||||||
|
{"prefix", []string{"test123"}, "prefix=test123"},
|
||||||
|
{"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
for _, valueToEncode := range testCase.valueToEncode {
|
||||||
|
urlValues.Add(testCase.queryKey, valueToEncode)
|
||||||
|
}
|
||||||
|
result := QueryEncode(urlValues)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests validate the URL path encoder.
|
||||||
|
func TestEncodePath(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
// Input.
|
||||||
|
inputStr string
|
||||||
|
// Expected result.
|
||||||
|
result string
|
||||||
|
}{
|
||||||
|
{"thisisthe%url", "thisisthe%25url"},
|
||||||
|
{"本語", "%E6%9C%AC%E8%AA%9E"},
|
||||||
|
{"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
|
||||||
|
{">123", "%3E123"},
|
||||||
|
{"myurl#link", "myurl%23link"},
|
||||||
|
{"space in url", "space%20in%20url"},
|
||||||
|
{"url+path", "url%2Bpath"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, testCase := range testCases {
|
||||||
|
result := EncodePath(testCase.inputStr)
|
||||||
|
if testCase.result != result {
|
||||||
|
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,8 +25,8 @@ import (
|
||||||
// StringSet - uses map as set of strings.
|
// StringSet - uses map as set of strings.
|
||||||
type StringSet map[string]struct{}
|
type StringSet map[string]struct{}
|
||||||
|
|
||||||
// keys - returns StringSet keys.
|
// ToSlice - returns StringSet as string slice.
|
||||||
func (set StringSet) keys() []string {
|
func (set StringSet) ToSlice() []string {
|
||||||
keys := make([]string, 0, len(set))
|
keys := make([]string, 0, len(set))
|
||||||
for k := range set {
|
for k := range set {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
|
@ -141,7 +141,7 @@ func (set StringSet) Union(sset StringSet) StringSet {
|
||||||
|
|
||||||
// MarshalJSON - converts to JSON data.
|
// MarshalJSON - converts to JSON data.
|
||||||
func (set StringSet) MarshalJSON() ([]byte, error) {
|
func (set StringSet) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(set.keys())
|
return json.Marshal(set.ToSlice())
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON - parses JSON data and creates new set with it.
|
// UnmarshalJSON - parses JSON data and creates new set with it.
|
||||||
|
@ -169,7 +169,7 @@ func (set *StringSet) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
// String - returns printable string of the set.
|
// String - returns printable string of the set.
|
||||||
func (set StringSet) String() string {
|
func (set StringSet) String() string {
|
||||||
return fmt.Sprintf("%s", set.keys())
|
return fmt.Sprintf("%s", set.ToSlice())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStringSet - creates new string set.
|
// NewStringSet - creates new string set.
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
package set
|
package set
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -320,3 +321,27 @@ func TestStringSetString(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StringSet.ToSlice() is called with series of cases for valid and erroneous inputs and the result is validated.
|
||||||
|
func TestStringSetToSlice(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
set StringSet
|
||||||
|
expectedResult string
|
||||||
|
}{
|
||||||
|
// Test empty set.
|
||||||
|
{NewStringSet(), `[]`},
|
||||||
|
// Test set with empty value.
|
||||||
|
{CreateStringSet(""), `[]`},
|
||||||
|
// Test set with value.
|
||||||
|
{CreateStringSet("foo"), `[foo]`},
|
||||||
|
// Test set with value.
|
||||||
|
{CreateStringSet("foo", "bar"), `[bar foo]`},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
sslice := testCase.set.ToSlice()
|
||||||
|
if str := fmt.Sprintf("%s", sslice); str != testCase.expectedResult {
|
||||||
|
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetSuccessStatusAction - Sets the status success code of the object for this policy
|
||||||
|
// based upload.
|
||||||
|
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
|
||||||
|
if strings.TrimSpace(status) == "" || status == "" {
|
||||||
|
return ErrInvalidArgument("Status is empty")
|
||||||
|
}
|
||||||
|
policyCond := policyCondition{
|
||||||
|
matchType: "eq",
|
||||||
|
condition: "$success_action_status",
|
||||||
|
value: status,
|
||||||
|
}
|
||||||
|
if err := p.addNewPolicy(policyCond); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.formData["success_action_status"] = status
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// addNewPolicy - internal helper to validate adding new policies.
|
// addNewPolicy - internal helper to validate adding new policies.
|
||||||
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
|
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
|
||||||
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
|
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
|
||||||
|
|
52
vendor/src/github.com/minio/minio-go/retry-continous.go
vendored
Normal file
52
vendor/src/github.com/minio/minio-go/retry-continous.go
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
|
||||||
|
func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
|
||||||
|
attemptCh := make(chan int)
|
||||||
|
|
||||||
|
// normalize jitter to the range [0, 1.0]
|
||||||
|
if jitter < NoJitter {
|
||||||
|
jitter = NoJitter
|
||||||
|
}
|
||||||
|
if jitter > MaxJitter {
|
||||||
|
jitter = MaxJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
// computes the exponential backoff duration according to
|
||||||
|
// https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||||
|
exponentialBackoffWait := func(attempt int) time.Duration {
|
||||||
|
// 1<<uint(attempt) below could overflow, so limit the value of attempt
|
||||||
|
maxAttempt := 30
|
||||||
|
if attempt > maxAttempt {
|
||||||
|
attempt = maxAttempt
|
||||||
|
}
|
||||||
|
//sleep = random_between(0, min(cap, base * 2 ** attempt))
|
||||||
|
sleep := unit * time.Duration(1<<uint(attempt))
|
||||||
|
if sleep > cap {
|
||||||
|
sleep = cap
|
||||||
|
}
|
||||||
|
if jitter != NoJitter {
|
||||||
|
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
|
||||||
|
}
|
||||||
|
return sleep
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(attemptCh)
|
||||||
|
var nextBackoff int
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
// Attempts starts.
|
||||||
|
case attemptCh <- nextBackoff:
|
||||||
|
nextBackoff++
|
||||||
|
case <-doneCh:
|
||||||
|
// Stop the routine.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(exponentialBackoffWait(nextBackoff))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return attemptCh
|
||||||
|
}
|
|
@ -20,9 +20,12 @@ package minio
|
||||||
// "cn-north-1" adds support for AWS China.
|
// "cn-north-1" adds support for AWS China.
|
||||||
var awsS3EndpointMap = map[string]string{
|
var awsS3EndpointMap = map[string]string{
|
||||||
"us-east-1": "s3.amazonaws.com",
|
"us-east-1": "s3.amazonaws.com",
|
||||||
|
"us-east-2": "s3-us-east-2.amazonaws.com",
|
||||||
"us-west-2": "s3-us-west-2.amazonaws.com",
|
"us-west-2": "s3-us-west-2.amazonaws.com",
|
||||||
"us-west-1": "s3-us-west-1.amazonaws.com",
|
"us-west-1": "s3-us-west-1.amazonaws.com",
|
||||||
|
"ca-central-1": "s3.ca-central-1.amazonaws.com",
|
||||||
"eu-west-1": "s3-eu-west-1.amazonaws.com",
|
"eu-west-1": "s3-eu-west-1.amazonaws.com",
|
||||||
|
"eu-west-2": "s3-eu-west-2.amazonaws.com",
|
||||||
"eu-central-1": "s3-eu-central-1.amazonaws.com",
|
"eu-central-1": "s3-eu-central-1.amazonaws.com",
|
||||||
"ap-south-1": "s3-ap-south-1.amazonaws.com",
|
"ap-south-1": "s3-ap-south-1.amazonaws.com",
|
||||||
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
|
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Contains common used utilities for tests.
|
// Contains common used utilities for tests.
|
||||||
|
@ -62,3 +63,12 @@ func encodeResponse(response interface{}) []byte {
|
||||||
encode.Encode(response)
|
encode.Encode(response)
|
||||||
return bytesBuffer.Bytes()
|
return bytesBuffer.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Convert string to bool and always return true if any error
|
||||||
|
func mustParseBool(str string) bool {
|
||||||
|
b, err := strconv.ParseBool(str)
|
||||||
|
if err != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
219
vendor/src/github.com/minio/minio-go/utils.go
vendored
219
vendor/src/github.com/minio/minio-go/utils.go
vendored
|
@ -17,11 +17,8 @@
|
||||||
package minio
|
package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -29,10 +26,11 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// xmlDecoder provide decoded value in xml.
|
// xmlDecoder provide decoded value in xml.
|
||||||
|
@ -55,13 +53,6 @@ func sumMD5(data []byte) []byte {
|
||||||
return hash.Sum(nil)
|
return hash.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sumHMAC calculate hmac between two input byte array.
|
|
||||||
func sumHMAC(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getEndpointURL - construct a new endpoint.
|
// getEndpointURL - construct a new endpoint.
|
||||||
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
|
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
|
||||||
if strings.Contains(endpoint, ":") {
|
if strings.Contains(endpoint, ":") {
|
||||||
|
@ -69,12 +60,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !isValidIP(host) && !isValidDomain(host) {
|
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
|
||||||
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
||||||
return nil, ErrInvalidArgument(msg)
|
return nil, ErrInvalidArgument(msg)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !isValidIP(endpoint) && !isValidDomain(endpoint) {
|
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
|
||||||
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
|
||||||
return nil, ErrInvalidArgument(msg)
|
return nil, ErrInvalidArgument(msg)
|
||||||
}
|
}
|
||||||
|
@ -93,45 +84,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate incoming endpoint URL.
|
// Validate incoming endpoint URL.
|
||||||
if err := isValidEndpointURL(endpointURL.String()); err != nil {
|
if err := isValidEndpointURL(*endpointURL); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return endpointURL, nil
|
return endpointURL, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// isValidDomain validates if input string is a valid domain name.
|
|
||||||
func isValidDomain(host string) bool {
|
|
||||||
// See RFC 1035, RFC 3696.
|
|
||||||
host = strings.TrimSpace(host)
|
|
||||||
if len(host) == 0 || len(host) > 255 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// host cannot start or end with "-"
|
|
||||||
if host[len(host)-1:] == "-" || host[:1] == "-" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// host cannot start or end with "_"
|
|
||||||
if host[len(host)-1:] == "_" || host[:1] == "_" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// host cannot start or end with a "."
|
|
||||||
if host[len(host)-1:] == "." || host[:1] == "." {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// All non alphanumeric characters are invalid.
|
|
||||||
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// No need to regexp match, since the list is non-exhaustive.
|
|
||||||
// We let it valid and fail later.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidIP parses input string for ip address validity.
|
|
||||||
func isValidIP(ip string) bool {
|
|
||||||
return net.ParseIP(ip) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// closeResponse close non nil response with any response Body.
|
// closeResponse close non nil response with any response Body.
|
||||||
// convenient wrapper to drain any remaining data on response body.
|
// convenient wrapper to drain any remaining data on response body.
|
||||||
//
|
//
|
||||||
|
@ -152,92 +110,24 @@ func closeResponse(resp *http.Response) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isVirtualHostSupported - verifies if bucketName can be part of
|
// Sentinel URL is the default url value which is invalid.
|
||||||
// virtual host. Currently only Amazon S3 and Google Cloud Storage
|
var sentinelURL = url.URL{}
|
||||||
// would support this.
|
|
||||||
func isVirtualHostSupported(endpointURL string, bucketName string) bool {
|
|
||||||
url, err := url.Parse(endpointURL)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// bucketName can be valid but '.' in the hostname will fail SSL
|
|
||||||
// certificate validation. So do not use host-style for such buckets.
|
|
||||||
if url.Scheme == "https" && strings.Contains(bucketName, ".") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Return true for all other cases
|
|
||||||
return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match if it is exactly Amazon S3 endpoint.
|
|
||||||
func isAmazonEndpoint(endpointURL string) bool {
|
|
||||||
if isAmazonChinaEndpoint(endpointURL) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
url, err := url.Parse(endpointURL)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if url.Host == "s3.amazonaws.com" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match if it is exactly Amazon S3 China endpoint.
|
|
||||||
// Customers who wish to use the new Beijing Region are required
|
|
||||||
// to sign up for a separate set of account credentials unique to
|
|
||||||
// the China (Beijing) Region. Customers with existing AWS credentials
|
|
||||||
// will not be able to access resources in the new Region, and vice versa.
|
|
||||||
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
|
|
||||||
func isAmazonChinaEndpoint(endpointURL string) bool {
|
|
||||||
if endpointURL == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
url, err := url.Parse(endpointURL)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match if it is exactly Google cloud storage endpoint.
|
|
||||||
func isGoogleEndpoint(endpointURL string) bool {
|
|
||||||
if endpointURL == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
url, err := url.Parse(endpointURL)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if url.Host == "storage.googleapis.com" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify if input endpoint URL is valid.
|
// Verify if input endpoint URL is valid.
|
||||||
func isValidEndpointURL(endpointURL string) error {
|
func isValidEndpointURL(endpointURL url.URL) error {
|
||||||
if endpointURL == "" {
|
if endpointURL == sentinelURL {
|
||||||
return ErrInvalidArgument("Endpoint url cannot be empty.")
|
return ErrInvalidArgument("Endpoint url cannot be empty.")
|
||||||
}
|
}
|
||||||
url, err := url.Parse(endpointURL)
|
if endpointURL.Path != "/" && endpointURL.Path != "" {
|
||||||
if err != nil {
|
|
||||||
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
||||||
}
|
}
|
||||||
if url.Path != "/" && url.Path != "" {
|
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
|
||||||
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
|
if !s3utils.IsAmazonEndpoint(endpointURL) {
|
||||||
}
|
|
||||||
if strings.Contains(endpointURL, ".amazonaws.com") {
|
|
||||||
if !isAmazonEndpoint(endpointURL) {
|
|
||||||
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
|
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.Contains(endpointURL, ".googleapis.com") {
|
if strings.Contains(endpointURL.Host, ".googleapis.com") {
|
||||||
if !isGoogleEndpoint(endpointURL) {
|
if !s3utils.IsGoogleEndpoint(endpointURL) {
|
||||||
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
|
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -260,6 +150,9 @@ func isValidExpiry(expires time.Duration) error {
|
||||||
// style requests instead for such buckets.
|
// style requests instead for such buckets.
|
||||||
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||||
|
|
||||||
|
// Invalid bucket name with double dot.
|
||||||
|
var invalidDotBucketName = regexp.MustCompile(`\.\.`)
|
||||||
|
|
||||||
// isValidBucketName - verify bucket name in accordance with
|
// isValidBucketName - verify bucket name in accordance with
|
||||||
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
|
||||||
func isValidBucketName(bucketName string) error {
|
func isValidBucketName(bucketName string) error {
|
||||||
|
@ -275,7 +168,7 @@ func isValidBucketName(bucketName string) error {
|
||||||
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
|
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
|
||||||
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
|
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
|
||||||
}
|
}
|
||||||
if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
|
if invalidDotBucketName.MatchString(bucketName) {
|
||||||
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
|
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
|
||||||
}
|
}
|
||||||
if !validBucketName.MatchString(bucketName) {
|
if !validBucketName.MatchString(bucketName) {
|
||||||
|
@ -310,67 +203,25 @@ func isValidObjectPrefix(objectPrefix string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// queryEncode - encodes query values in their URL encoded form.
|
// make a copy of http.Header
|
||||||
func queryEncode(v url.Values) string {
|
func cloneHeader(h http.Header) http.Header {
|
||||||
if v == nil {
|
h2 := make(http.Header, len(h))
|
||||||
return ""
|
for k, vv := range h {
|
||||||
|
vv2 := make([]string, len(vv))
|
||||||
|
copy(vv2, vv)
|
||||||
|
h2[k] = vv2
|
||||||
}
|
}
|
||||||
var buf bytes.Buffer
|
return h2
|
||||||
keys := make([]string, 0, len(v))
|
|
||||||
for k := range v {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
vs := v[k]
|
|
||||||
prefix := urlEncodePath(k) + "="
|
|
||||||
for _, v := range vs {
|
|
||||||
if buf.Len() > 0 {
|
|
||||||
buf.WriteByte('&')
|
|
||||||
}
|
|
||||||
buf.WriteString(prefix)
|
|
||||||
buf.WriteString(urlEncodePath(v))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
|
// Filter relevant response headers from
|
||||||
//
|
// the HEAD, GET http response. The function takes
|
||||||
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
|
// a list of headers which are filtered out and
|
||||||
// non english characters cannot be parsed due to the nature in which url.Encode() is written
|
// returned as a new http header.
|
||||||
//
|
func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
|
||||||
// This function on the other hand is a direct replacement for url.Encode() technique to support
|
filteredHeader = cloneHeader(header)
|
||||||
// pretty much every UTF-8 character.
|
for _, key := range filterKeys {
|
||||||
func urlEncodePath(pathName string) string {
|
filteredHeader.Del(key)
|
||||||
// if object matches reserved string, no need to encode them
|
|
||||||
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
|
|
||||||
if reservedNames.MatchString(pathName) {
|
|
||||||
return pathName
|
|
||||||
}
|
}
|
||||||
var encodedPathname string
|
return filteredHeader
|
||||||
for _, s := range pathName {
|
|
||||||
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
|
|
||||||
encodedPathname = encodedPathname + string(s)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch s {
|
|
||||||
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
|
|
||||||
encodedPathname = encodedPathname + string(s)
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
len := utf8.RuneLen(s)
|
|
||||||
if len < 0 {
|
|
||||||
// if utf8 cannot convert return the same string as is
|
|
||||||
return pathName
|
|
||||||
}
|
|
||||||
u := make([]byte, len)
|
|
||||||
utf8.EncodeRune(u, s)
|
|
||||||
for _, r := range u {
|
|
||||||
hex := hex.EncodeToString([]byte{r})
|
|
||||||
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return encodedPathname
|
|
||||||
}
|
}
|
||||||
|
|
254
vendor/src/github.com/minio/minio-go/utils_test.go
vendored
254
vendor/src/github.com/minio/minio-go/utils_test.go
vendored
|
@ -17,11 +17,27 @@ package minio
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Tests filter header function by filtering out
|
||||||
|
// some custom header keys.
|
||||||
|
func TestFilterHeader(t *testing.T) {
|
||||||
|
header := http.Header{}
|
||||||
|
header.Set("Content-Type", "binary/octet-stream")
|
||||||
|
header.Set("Content-Encoding", "gzip")
|
||||||
|
newHeader := filterHeader(header, []string{"Content-Type"})
|
||||||
|
if len(newHeader) > 1 {
|
||||||
|
t.Fatalf("Unexpected size of the returned header, should be 1, got %d", len(newHeader))
|
||||||
|
}
|
||||||
|
if newHeader.Get("Content-Encoding") != "gzip" {
|
||||||
|
t.Fatalf("Unexpected content-encoding value, expected 'gzip', got %s", newHeader.Get("Content-Encoding"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
|
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
|
||||||
func TestGetEndpointURL(t *testing.T) {
|
func TestGetEndpointURL(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -74,35 +90,6 @@ func TestGetEndpointURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests for 'isValidDomain(host string) bool'.
|
|
||||||
func TestIsValidDomain(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
// Input.
|
|
||||||
host string
|
|
||||||
// Expected result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"s3.amazonaws.com", true},
|
|
||||||
{"s3.cn-north-1.amazonaws.com.cn", true},
|
|
||||||
{"s3.amazonaws.com_", false},
|
|
||||||
{"%$$$", false},
|
|
||||||
{"s3.amz.test.com", true},
|
|
||||||
{"s3.%%", false},
|
|
||||||
{"localhost", true},
|
|
||||||
{"-localhost", false},
|
|
||||||
{"", false},
|
|
||||||
{"\n \t", false},
|
|
||||||
{" ", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isValidDomain(testCase.host)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate end point validator.
|
// Tests validate end point validator.
|
||||||
func TestIsValidEndpointURL(t *testing.T) {
|
func TestIsValidEndpointURL(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -125,161 +112,33 @@ func TestIsValidEndpointURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
err := isValidEndpointURL(testCase.url)
|
var u url.URL
|
||||||
|
if testCase.url == "" {
|
||||||
|
u = sentinelURL
|
||||||
|
} else {
|
||||||
|
u1, err := url.Parse(testCase.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
|
}
|
||||||
|
u = *u1
|
||||||
|
}
|
||||||
|
err := isValidEndpointURL(u)
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
|
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
|
||||||
}
|
}
|
||||||
if err == nil && !testCase.shouldPass {
|
if err == nil && !testCase.shouldPass {
|
||||||
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
|
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err)
|
||||||
}
|
}
|
||||||
// Failed as expected, but does it fail for the expected reason.
|
// Failed as expected, but does it fail for the expected reason.
|
||||||
if err != nil && !testCase.shouldPass {
|
if err != nil && !testCase.shouldPass {
|
||||||
if err.Error() != testCase.err.Error() {
|
if err.Error() != testCase.err.Error() {
|
||||||
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
|
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate IP address validator.
|
|
||||||
func TestIsValidIP(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
// Input.
|
|
||||||
ip string
|
|
||||||
// Expected result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"192.168.1.1", true},
|
|
||||||
{"192.168.1", false},
|
|
||||||
{"192.168.1.1.1", false},
|
|
||||||
{"-192.168.1.1", false},
|
|
||||||
{"260.192.1.1", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isValidIP(testCase.ip)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate virtual host validator.
|
|
||||||
func TestIsVirtualHostSupported(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
url string
|
|
||||||
bucket string
|
|
||||||
// Expeceted result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"https://s3.amazonaws.com", "my-bucket", true},
|
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
|
|
||||||
{"https://s3.amazonaws.com", "my-bucket.", false},
|
|
||||||
{"https://amazons3.amazonaws.com", "my-bucket.", false},
|
|
||||||
{"https://storage.googleapis.com/", "my-bucket", true},
|
|
||||||
{"https://mystorage.googleapis.com/", "my-bucket", false},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isVirtualHostSupported(testCase.url, testCase.bucket)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate Amazon endpoint validator.
|
|
||||||
func TestIsAmazonEndpoint(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
url string
|
|
||||||
// Expected result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"https://192.168.1.1", false},
|
|
||||||
{"192.168.1.1", false},
|
|
||||||
{"http://storage.googleapis.com", false},
|
|
||||||
{"https://storage.googleapis.com", false},
|
|
||||||
{"storage.googleapis.com", false},
|
|
||||||
{"s3.amazonaws.com", false},
|
|
||||||
{"https://amazons3.amazonaws.com", false},
|
|
||||||
{"-192.168.1.1", false},
|
|
||||||
{"260.192.1.1", false},
|
|
||||||
// valid inputs.
|
|
||||||
{"https://s3.amazonaws.com", true},
|
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isAmazonEndpoint(testCase.url)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate Amazon S3 China endpoint validator.
|
|
||||||
func TestIsAmazonChinaEndpoint(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
url string
|
|
||||||
// Expected result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"https://192.168.1.1", false},
|
|
||||||
{"192.168.1.1", false},
|
|
||||||
{"http://storage.googleapis.com", false},
|
|
||||||
{"https://storage.googleapis.com", false},
|
|
||||||
{"storage.googleapis.com", false},
|
|
||||||
{"s3.amazonaws.com", false},
|
|
||||||
{"https://amazons3.amazonaws.com", false},
|
|
||||||
{"-192.168.1.1", false},
|
|
||||||
{"260.192.1.1", false},
|
|
||||||
// s3.amazonaws.com is not a valid Amazon S3 China end point.
|
|
||||||
{"https://s3.amazonaws.com", false},
|
|
||||||
// valid input.
|
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isAmazonChinaEndpoint(testCase.url)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate Google Cloud end point validator.
|
|
||||||
func TestIsGoogleEndpoint(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
url string
|
|
||||||
// Expected result.
|
|
||||||
result bool
|
|
||||||
}{
|
|
||||||
{"192.168.1.1", false},
|
|
||||||
{"https://192.168.1.1", false},
|
|
||||||
{"s3.amazonaws.com", false},
|
|
||||||
{"http://s3.amazonaws.com", false},
|
|
||||||
{"https://s3.amazonaws.com", false},
|
|
||||||
{"https://s3.cn-north-1.amazonaws.com.cn", false},
|
|
||||||
{"-192.168.1.1", false},
|
|
||||||
{"260.192.1.1", false},
|
|
||||||
// valid inputs.
|
|
||||||
{"http://storage.googleapis.com", true},
|
|
||||||
{"https://storage.googleapis.com", true},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := isGoogleEndpoint(testCase.url)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate the expiry time validator.
|
// Tests validate the expiry time validator.
|
||||||
func TestIsValidExpiry(t *testing.T) {
|
func TestIsValidExpiry(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -355,56 +214,3 @@ func TestIsValidBucketName(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests validate the query encoder.
|
|
||||||
func TestQueryEncode(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
queryKey string
|
|
||||||
valueToEncode []string
|
|
||||||
// Expected result.
|
|
||||||
result string
|
|
||||||
}{
|
|
||||||
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
|
|
||||||
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
|
|
||||||
{"prefix", []string{"test#123"}, "prefix=test%23123"},
|
|
||||||
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
|
|
||||||
{"prefix", []string{"test123"}, "prefix=test123"},
|
|
||||||
{"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
urlValues := make(url.Values)
|
|
||||||
for _, valueToEncode := range testCase.valueToEncode {
|
|
||||||
urlValues.Add(testCase.queryKey, valueToEncode)
|
|
||||||
}
|
|
||||||
result := queryEncode(urlValues)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests validate the URL path encoder.
|
|
||||||
func TestUrlEncodePath(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
// Input.
|
|
||||||
inputStr string
|
|
||||||
// Expected result.
|
|
||||||
result string
|
|
||||||
}{
|
|
||||||
{"thisisthe%url", "thisisthe%25url"},
|
|
||||||
{"本語", "%E6%9C%AC%E8%AA%9E"},
|
|
||||||
{"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
|
|
||||||
{">123", "%3E123"},
|
|
||||||
{"myurl#link", "myurl%23link"},
|
|
||||||
{"space in url", "space%20in%20url"},
|
|
||||||
{"url+path", "url%2Bpath"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
result := urlEncodePath(testCase.inputStr)
|
|
||||||
if testCase.result != result {
|
|
||||||
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue