Update s3 library (again)

This commit is contained in:
Alexander Neumann 2016-01-07 20:23:38 +01:00
parent 181480b68b
commit 0e9236475b
41 changed files with 2793 additions and 1753 deletions

4
Godeps/Godeps.json generated
View file

@ -24,8 +24,8 @@
}, },
{ {
"ImportPath": "github.com/minio/minio-go", "ImportPath": "github.com/minio/minio-go",
"Comment": "v0.2.5-201-g410319e", "Comment": "v0.2.5-205-g38be406",
"Rev": "410319e0e39a372998f4d9cd2b9da4ff243ae388" "Rev": "38be40605dc37d2d7ec06169218365b46ae33e4b"
}, },
{ {
"ImportPath": "github.com/pkg/sftp", "ImportPath": "github.com/pkg/sftp",

View file

@ -15,7 +15,7 @@ go:
script: script:
- go vet ./... - go vet ./...
- go test -test.short -race -v ./... - go test -short -race -v ./...
notifications: notifications:
slack: slack:

View file

@ -67,14 +67,14 @@ func main() {
* [RemoveBucket(bucketName) error](examples/s3/removebucket.go) * [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) * [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go)
* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) * [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go)
* [ListBuckets() []BucketStat](examples/s3/listbuckets.go) * [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectStat](examples/s3/listobjects.go) * [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartStat](examples/s3/listincompleteuploads.go) * [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
### Object Operations. ### Object Operations.
* [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go) * [PutObject(bucketName, objectName, io.Reader, size, contentType) error](examples/s3/putobject.go)
* [GetObject(bucketName, objectName) (io.ReadCloser, ObjectStat, error)](examples/s3/getobject.go) * [GetObject(bucketName, objectName) (io.ReadCloser, ObjectInfo, error)](examples/s3/getobject.go)
* [StatObject(bucketName, objectName) (ObjectStat, error)](examples/s3/statobject.go) * [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go)
* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go) * [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go) * [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)

View file

@ -16,26 +16,27 @@
package minio package minio
import ( import "time"
"io"
"time"
)
// BucketStat container for bucket metadata. // BucketInfo container for bucket metadata.
type BucketStat struct { type BucketInfo struct {
// The name of the bucket. // The name of the bucket.
Name string Name string
// Date the bucket was created. // Date the bucket was created.
CreationDate time.Time CreationDate time.Time
} }
// ObjectStat container for object metadata. // ObjectInfo container for object metadata.
type ObjectStat struct { type ObjectInfo struct {
// An ETag is optionally set to md5sum of an object. In case of multipart objects,
// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
// each parts concatenated into one string.
ETag string ETag string
Key string
LastModified time.Time Key string // Name of the object
Size int64 LastModified time.Time // Date and time the object was last modified.
ContentType string Size int64 // Size in bytes of the object.
ContentType string // A standard MIME type describing the format of the object data.
// Owner name. // Owner name.
Owner struct { Owner struct {
@ -50,18 +51,21 @@ type ObjectStat struct {
Err error Err error
} }
// ObjectMultipartStat container for multipart object metadata. // ObjectMultipartInfo container for multipart object metadata.
type ObjectMultipartStat struct { type ObjectMultipartInfo struct {
// Date and time at which the multipart upload was initiated. // Date and time at which the multipart upload was initiated.
Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
Initiator initiator Initiator initiator
Owner owner Owner owner
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass string StorageClass string
// Key of the object for which the multipart upload was initiated. // Key of the object for which the multipart upload was initiated.
Key string Key string
// Size in bytes of the object.
Size int64 Size int64
// Upload ID that identifies the multipart upload. // Upload ID that identifies the multipart upload.
@ -70,24 +74,3 @@ type ObjectMultipartStat struct {
// Error // Error
Err error Err error
} }
// partData - container for each part.
type partData struct {
MD5Sum []byte
Sha256Sum []byte
ReadCloser io.ReadCloser
Size int64
Number int // partData number.
// Error
Err error
}
// putObjectData - container for each single PUT operation.
type putObjectData struct {
MD5Sum []byte
Sha256Sum []byte
ReadCloser io.ReadCloser
Size int64
ContentType string
}

View file

@ -17,7 +17,6 @@
package minio package minio
import ( import (
"encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
@ -36,7 +35,7 @@ import (
</Error> </Error>
*/ */
// ErrorResponse is the type error returned by some API operations. // ErrorResponse - Is the typed error returned by all API operations.
type ErrorResponse struct { type ErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"` XMLName xml.Name `xml:"Error" json:"-"`
Code string Code string
@ -46,12 +45,13 @@ type ErrorResponse struct {
RequestID string `xml:"RequestId"` RequestID string `xml:"RequestId"`
HostID string `xml:"HostId"` HostID string `xml:"HostId"`
// This is a new undocumented field, set only if available. // Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
AmzBucketRegion string AmzBucketRegion string
} }
// ToErrorResponse returns parsed ErrorResponse struct, if input is nil or not ErrorResponse return value is nil // ToErrorResponse - Returns parsed ErrorResponse struct from body and
// this fuction is useful when some one wants to dig deeper into the error structures over the network. // http headers.
// //
// For example: // For example:
// //
@ -61,7 +61,6 @@ type ErrorResponse struct {
// reader, stat, err := s3.GetObject(...) // reader, stat, err := s3.GetObject(...)
// if err != nil { // if err != nil {
// resp := s3.ToErrorResponse(err) // resp := s3.ToErrorResponse(err)
// fmt.Println(resp.ToXML())
// } // }
// ... // ...
func ToErrorResponse(err error) ErrorResponse { func ToErrorResponse(err error) ErrorResponse {
@ -73,47 +72,32 @@ func ToErrorResponse(err error) ErrorResponse {
} }
} }
// ToXML send raw xml marshalled as string // Error - Returns HTTP error string
func (e ErrorResponse) ToXML() string {
b, err := xml.Marshal(&e)
if err != nil {
panic(err)
}
return string(b)
}
// ToJSON send raw json marshalled as string
func (e ErrorResponse) ToJSON() string {
b, err := json.Marshal(&e)
if err != nil {
panic(err)
}
return string(b)
}
// Error formats HTTP error string
func (e ErrorResponse) Error() string { func (e ErrorResponse) Error() string {
return e.Message return e.Message
} }
// Common reporting string // Common string for errors to report issue location in unexpected
// cases.
const ( const (
reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
) )
// HTTPRespToErrorResponse returns a new encoded ErrorResponse structure // HTTPRespToErrorResponse returns a new encoded ErrorResponse
// structure as error.
func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
if resp == nil { if resp == nil {
msg := "Response is empty. " + reportIssue msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg) return ErrInvalidArgument(msg)
} }
var errorResponse ErrorResponse var errResp ErrorResponse
err := xmlDecoder(resp.Body, &errorResponse) err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil { if err != nil {
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusNotFound: case http.StatusNotFound:
if objectName == "" { if objectName == "" {
errorResponse = ErrorResponse{ errResp = ErrorResponse{
Code: "NoSuchBucket", Code: "NoSuchBucket",
Message: "The specified bucket does not exist.", Message: "The specified bucket does not exist.",
BucketName: bucketName, BucketName: bucketName,
@ -122,7 +106,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string)
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
} else { } else {
errorResponse = ErrorResponse{ errResp = ErrorResponse{
Code: "NoSuchKey", Code: "NoSuchKey",
Message: "The specified key does not exist.", Message: "The specified key does not exist.",
BucketName: bucketName, BucketName: bucketName,
@ -133,7 +117,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
} }
case http.StatusForbidden: case http.StatusForbidden:
errorResponse = ErrorResponse{ errResp = ErrorResponse{
Code: "AccessDenied", Code: "AccessDenied",
Message: "Access Denied.", Message: "Access Denied.",
BucketName: bucketName, BucketName: bucketName,
@ -143,7 +127,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string)
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
case http.StatusConflict: case http.StatusConflict:
errorResponse = ErrorResponse{ errResp = ErrorResponse{
Code: "Conflict", Code: "Conflict",
Message: "Bucket not empty.", Message: "Bucket not empty.",
BucketName: bucketName, BucketName: bucketName,
@ -152,7 +136,7 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string)
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
default: default:
errorResponse = ErrorResponse{ errResp = ErrorResponse{
Code: resp.Status, Code: resp.Status,
Message: resp.Status, Message: resp.Status,
BucketName: bucketName, BucketName: bucketName,
@ -162,10 +146,21 @@ func HTTPRespToErrorResponse(resp *http.Response, bucketName, objectName string)
} }
} }
} }
return errorResponse
// AccessDenied without a signature mismatch code, usually means
// that the bucket policy has certain restrictions where some API
// operations are not allowed. Handle this case so that top level
// callers can interpret this easily and fall back if needed to a
// lower functionality call. Read each individual API specific
// code for such fallbacks.
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" {
errResp.Code = "NotImplemented"
errResp.Message = "Operation is not allowed according to your bucket policy."
}
return errResp
} }
// ErrEntityTooLarge input size is larger than supported maximum. // ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error { func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize) msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size '5GiB' for single PUT operation.", totalSize)
return ErrorResponse{ return ErrorResponse{
@ -176,7 +171,19 @@ func ErrEntityTooLarge(totalSize int64, bucketName, objectName string) error {
} }
} }
// ErrUnexpectedShortRead unexpected shorter read of input buffer from target. // ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
return ErrorResponse{
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
Key: objectName,
}
}
// ErrUnexpectedShortRead - Unexpected shorter read of input buffer from
// target.
func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error { func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is shorter than the size %s of input buffer.", msg := fmt.Sprintf("Data read %s is shorter than the size %s of input buffer.",
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
@ -188,7 +195,7 @@ func ErrUnexpectedShortRead(totalRead, totalSize int64, bucketName, objectName s
} }
} }
// ErrUnexpectedEOF unexpected end of file reached. // ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.", msg := fmt.Sprintf("Data read %s is not equal to the size %s of the input Reader.",
strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10)) strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
@ -200,7 +207,7 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
} }
} }
// ErrInvalidBucketName - invalid bucket name response. // ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error { func ErrInvalidBucketName(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "InvalidBucketName", Code: "InvalidBucketName",
@ -209,7 +216,7 @@ func ErrInvalidBucketName(message string) error {
} }
} }
// ErrInvalidObjectName - invalid object name response. // ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error { func ErrInvalidObjectName(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "NoSuchKey", Code: "NoSuchKey",
@ -218,7 +225,7 @@ func ErrInvalidObjectName(message string) error {
} }
} }
// ErrInvalidParts - invalid number of parts. // ErrInvalidParts - Invalid number of parts.
func ErrInvalidParts(expectedParts, uploadedParts int) error { func ErrInvalidParts(expectedParts, uploadedParts int) error {
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts) msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
return ErrorResponse{ return ErrorResponse{
@ -228,11 +235,11 @@ func ErrInvalidParts(expectedParts, uploadedParts int) error {
} }
} }
// ErrInvalidObjectPrefix - invalid object prefix response is // ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response. // similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - invalid argument response. // ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error { func ErrInvalidArgument(message string) error {
return ErrorResponse{ return ErrorResponse{
Code: "InvalidArgument", Code: "InvalidArgument",

View file

@ -22,7 +22,7 @@ import (
"path/filepath" "path/filepath"
) )
// FGetObject - get object to a file. // FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error { func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {

View file

@ -28,15 +28,18 @@ import (
"time" "time"
) )
// GetBucketACL get the permissions on an existing bucket. // GetBucketACL - Get the permissions on an existing bucket.
// //
// Returned values are: // Returned values are:
// //
// private - owner gets full access. // private - Owner gets full access.
// public-read - owner gets full access, others get read access. // public-read - Owner gets full access, others get read access.
// public-read-write - owner gets full access, others get full access too. // public-read-write - Owner gets full access, others get full access
// authenticated-read - owner gets full access, authenticated users get read access. // too.
// authenticated-read - Owner gets full access, authenticated users
// get read access.
func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return "", err return "", err
} }
@ -73,9 +76,10 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
return "", err return "", err
} }
// We need to avoid following de-serialization check for Google Cloud Storage. // We need to avoid following de-serialization check for Google
// On Google Cloud Storage "private" canned ACL's policy do not have grant list. // Cloud Storage. On Google Cloud Storage "private" canned ACL's
// Treat it as a valid case, check for all other vendors. // policy do not have grant list. Treat it as a valid case, check
// for all other vendors.
if !isGoogleEndpoint(c.endpointURL) { if !isGoogleEndpoint(c.endpointURL) {
if policy.AccessControlList.Grant == nil { if policy.AccessControlList.Grant == nil {
errorResponse := ErrorResponse{ errorResponse := ErrorResponse{
@ -90,8 +94,8 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
} }
} }
// boolean cues to indentify right canned acls. // Boolean cues to indentify right canned acls.
var publicRead, publicWrite bool var publicRead, publicWrite, authenticatedRead bool
// Handle grants. // Handle grants.
grants := policy.AccessControlList.Grant grants := policy.AccessControlList.Grant
@ -100,7 +104,8 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
continue continue
} }
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
return BucketACL("authenticated-read"), nil authenticatedRead = true
break
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
publicWrite = true publicWrite = true
} else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
@ -108,15 +113,19 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
} }
} }
// public write and not enabled. return. // Verify if acl is authenticated read.
if authenticatedRead {
return BucketACL("authenticated-read"), nil
}
// Verify if acl is private.
if !publicWrite && !publicRead { if !publicWrite && !publicRead {
return BucketACL("private"), nil return BucketACL("private"), nil
} }
// public write not enabled but public read is. return. // Verify if acl is public-read.
if !publicWrite && publicRead { if !publicWrite && publicRead {
return BucketACL("public-read"), nil return BucketACL("public-read"), nil
} }
// public read and public write are enabled return. // Verify if acl is public-read-write.
if publicRead && publicWrite { if publicRead && publicWrite {
return BucketACL("public-read-write"), nil return BucketACL("public-read-write"), nil
} }
@ -129,47 +138,30 @@ func (c Client) GetBucketACL(bucketName string) (BucketACL, error) {
} }
} }
// GetObject gets object content from specified bucket. // GetObject - returns an seekable, readable object.
// You may also look at GetPartialObject. func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
func (c Client) GetObject(bucketName, objectName string) (io.ReadCloser, ObjectStat, error) { // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectStat{}, err return nil, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return nil, ObjectStat{}, err return nil, err
} }
// get the whole object as a stream, no seek or resume supported for this. // Send an explicit info to get the actual object size.
return c.getObject(bucketName, objectName, 0, 0) objectInfo, err := c.StatObject(bucketName, objectName)
}
// ReadAtCloser readat closer interface.
type ReadAtCloser interface {
io.ReaderAt
io.Closer
}
// GetObjectPartial returns a io.ReadAt for reading sparse entries.
func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, ObjectStat, error) {
if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectStat{}, err
}
if err := isValidObjectName(objectName); err != nil {
return nil, ObjectStat{}, err
}
// Send an explicit stat to get the actual object size.
objectStat, err := c.StatObject(bucketName, objectName)
if err != nil { if err != nil {
return nil, ObjectStat{}, err return nil, err
} }
// Create request channel. // Create request channel.
reqCh := make(chan readAtRequest) reqCh := make(chan readRequest)
// Create response channel. // Create response channel.
resCh := make(chan readAtResponse) resCh := make(chan readResponse)
// Create done channel. // Create done channel.
doneCh := make(chan struct{}) doneCh := make(chan struct{})
// This routine feeds partial object data as and when the caller reads. // This routine feeds partial object data as and when the caller
// reads.
go func() { go func() {
defer close(reqCh) defer close(reqCh)
defer close(resCh) defer close(resCh)
@ -185,21 +177,21 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O
// Get shortest length. // Get shortest length.
// NOTE: Last remaining bytes are usually smaller than // NOTE: Last remaining bytes are usually smaller than
// req.Buffer size. Use that as the final length. // req.Buffer size. Use that as the final length.
length := math.Min(float64(len(req.Buffer)), float64(objectStat.Size-req.Offset)) length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset))
httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length))
if err != nil { if err != nil {
resCh <- readAtResponse{ resCh <- readResponse{
Error: err, Error: err,
} }
return return
} }
size, err := io.ReadFull(httpReader, req.Buffer) size, err := io.ReadFull(httpReader, req.Buffer)
if err == io.ErrUnexpectedEOF { if err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not all the bytes // If an EOF happens after reading some but not
// ReadFull returns ErrUnexpectedEOF // all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF err = io.EOF
} }
resCh <- readAtResponse{ resCh <- readResponse{
Size: int(size), Size: int(size),
Error: err, Error: err,
} }
@ -207,78 +199,148 @@ func (c Client) GetObjectPartial(bucketName, objectName string) (ReadAtCloser, O
} }
}() }()
// Return the readerAt backed by routine. // Return the readerAt backed by routine.
return newObjectReadAtCloser(reqCh, resCh, doneCh, objectStat.Size), objectStat, nil return newObject(reqCh, resCh, doneCh, objectInfo), nil
} }
// response message container to reply back for the request. // Read response message container to reply back for the request.
type readAtResponse struct { type readResponse struct {
Size int Size int
Error error Error error
} }
// request message container to communicate with internal go-routine. // Read request message container to communicate with internal
type readAtRequest struct { // go-routine.
type readRequest struct {
Buffer []byte Buffer []byte
Offset int64 // readAt offset. Offset int64 // readAt offset.
} }
// objectReadAtCloser container for io.ReadAtCloser. // Object represents an open object. It implements Read, ReadAt,
type objectReadAtCloser struct { // Seeker, Close for a HTTP stream.
// mutex. type Object struct {
// Mutex.
mutex *sync.Mutex mutex *sync.Mutex
// User allocated and defined. // User allocated and defined.
reqCh chan<- readAtRequest reqCh chan<- readRequest
resCh <-chan readAtResponse resCh <-chan readResponse
doneCh chan<- struct{} doneCh chan<- struct{}
objectSize int64 currOffset int64
objectInfo ObjectInfo
// Previous error saved for future calls. // Previous error saved for future calls.
prevErr error prevErr error
} }
// newObjectReadAtCloser implements a io.ReadSeeker for a HTTP stream. // Read reads up to len(p) bytes into p. It returns the number of
func newObjectReadAtCloser(reqCh chan<- readAtRequest, resCh <-chan readAtResponse, doneCh chan<- struct{}, objectSize int64) *objectReadAtCloser { // bytes read (0 <= n <= len(p)) and any error encountered. Returns
return &objectReadAtCloser{ // io.EOF upon end of file.
mutex: new(sync.Mutex), func (o *Object) Read(b []byte) (n int, err error) {
reqCh: reqCh, if o == nil {
resCh: resCh, return 0, ErrInvalidArgument("Object is nil")
doneCh: doneCh,
objectSize: objectSize,
} }
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// If current offset has reached Size limit, return EOF.
if o.currOffset >= o.objectInfo.Size {
return 0, io.EOF
}
// Previous prevErr is which was saved in previous operation.
if o.prevErr != nil {
return 0, o.prevErr
}
// Send current information over control channel to indicate we
// are ready.
reqMsg := readRequest{}
// Send the offset and pointer to the buffer over the channel.
reqMsg.Buffer = b
reqMsg.Offset = o.currOffset
// Send read request over the control channel.
o.reqCh <- reqMsg
// Get data over the response channel.
dataMsg := <-o.resCh
// Bytes read.
bytesRead := int64(dataMsg.Size)
// Update current offset.
o.currOffset += bytesRead
if dataMsg.Error == nil {
// If currOffset read is equal to objectSize
// We have reached end of file, we return io.EOF.
if o.currOffset >= o.objectInfo.Size {
return dataMsg.Size, io.EOF
}
return dataMsg.Size, nil
}
// Save any error.
o.prevErr = dataMsg.Error
return dataMsg.Size, dataMsg.Error
} }
// ReadAt reads len(b) bytes from the File starting at byte offset off. // Stat returns the ObjectInfo structure describing object.
// It returns the number of bytes read and the error, if any. func (o *Object) Stat() (ObjectInfo, error) {
// ReadAt always returns a non-nil error when n < len(b). if o == nil {
// At end of file, that error is io.EOF. return ObjectInfo{}, ErrInvalidArgument("Object is nil")
func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) { }
// Locking. // Locking.
r.mutex.Lock() o.mutex.Lock()
defer r.mutex.Unlock() defer o.mutex.Unlock()
// if offset is negative and offset is greater than or equal to object size we return EOF. if o.prevErr != nil {
if offset < 0 || offset >= r.objectSize { return ObjectInfo{}, o.prevErr
}
return o.objectInfo, nil
}
// ReadAt reads len(b) bytes from the File starting at byte offset
// off. It returns the number of bytes read and the error, if any.
// ReadAt always returns a non-nil error when n < len(b). At end of
// file, that error is io.EOF.
func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// If offset is negative and offset is greater than or equal to
// object size we return EOF.
if offset < 0 || offset >= o.objectInfo.Size {
return 0, io.EOF return 0, io.EOF
} }
// prevErr is which was saved in previous operation. // prevErr is which was saved in previous operation.
if r.prevErr != nil { if o.prevErr != nil {
return 0, r.prevErr return 0, o.prevErr
} }
// Send current information over control channel to indicate we are ready. // Send current information over control channel to indicate we
reqMsg := readAtRequest{} // are ready.
reqMsg := readRequest{}
// Send the current offset and bytes requested. // Send the offset and pointer to the buffer over the channel.
reqMsg.Buffer = b reqMsg.Buffer = b
reqMsg.Offset = offset reqMsg.Offset = offset
// Send read request over the control channel. // Send read request over the control channel.
r.reqCh <- reqMsg o.reqCh <- reqMsg
// Get data over the response channel. // Get data over the response channel.
dataMsg := <-r.resCh dataMsg := <-o.resCh
// Bytes read. // Bytes read.
bytesRead := int64(dataMsg.Size) bytesRead := int64(dataMsg.Size)
@ -286,38 +348,109 @@ func (r *objectReadAtCloser) ReadAt(b []byte, offset int64) (int, error) {
if dataMsg.Error == nil { if dataMsg.Error == nil {
// If offset+bytes read is equal to objectSize // If offset+bytes read is equal to objectSize
// we have reached end of file, we return io.EOF. // we have reached end of file, we return io.EOF.
if offset+bytesRead == r.objectSize { if offset+bytesRead == o.objectInfo.Size {
return dataMsg.Size, io.EOF return dataMsg.Size, io.EOF
} }
return dataMsg.Size, nil return dataMsg.Size, nil
} }
// Save any error. // Save any error.
r.prevErr = dataMsg.Error o.prevErr = dataMsg.Error
return dataMsg.Size, dataMsg.Error return dataMsg.Size, dataMsg.Error
} }
// Closer is the interface that wraps the basic Close method. // Seek sets the offset for the next Read or Write to offset,
// interpreted according to whence: 0 means relative to the
// origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end.
// Seek returns the new offset and an error, if any.
// //
// The behavior of Close after the first call returns error for // Seeking to a negative offset is an error. Seeking to any positive
// subsequent Close() calls. // offset is legal, subsequent io operations succeed until the
func (r *objectReadAtCloser) Close() (err error) { // underlying object is not closed.
func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o == nil {
return 0, ErrInvalidArgument("Object is nil")
}
// Locking. // Locking.
r.mutex.Lock() o.mutex.Lock()
defer r.mutex.Unlock() defer o.mutex.Unlock()
if o.prevErr != nil {
// At EOF seeking is legal, for any other errors we return.
if o.prevErr != io.EOF {
return 0, o.prevErr
}
}
// Negative offset is valid for whence of '2'.
if offset < 0 && whence != 2 {
return 0, ErrInvalidArgument(fmt.Sprintf("Object: negative position not allowed for %d.", whence))
}
switch whence {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Object: invalid whence %d", whence))
case 0:
if offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
if o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
if offset > 0 {
return 0, io.EOF
}
// Seeking to negative position not allowed for whence.
if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Object: Seeking at negative offset not allowed for %d", whence))
}
o.currOffset += offset
}
// Return the effective offset.
return o.currOffset, nil
}
// Close - The behavior of Close after the first call returns error
// for subsequent Close() calls.
func (o *Object) Close() (err error) {
if o == nil {
return ErrInvalidArgument("Object is nil")
}
// Locking.
o.mutex.Lock()
defer o.mutex.Unlock()
// prevErr is which was saved in previous operation. // prevErr is which was saved in previous operation.
if r.prevErr != nil { if o.prevErr != nil {
return r.prevErr return o.prevErr
} }
// Close successfully. // Close successfully.
close(r.doneCh) close(o.doneCh)
// Save this for any subsequent frivolous reads. // Save this for any subsequent frivolous reads.
errMsg := "objectReadAtCloser: is already closed. Bad file descriptor." errMsg := "Object: Is already closed. Bad file descriptor."
r.prevErr = errors.New(errMsg) o.prevErr = errors.New(errMsg)
return return nil
}
// newObject instantiates a new *minio.Object*
func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object {
return &Object{
mutex: &sync.Mutex{},
reqCh: reqCh,
resCh: resCh,
doneCh: doneCh,
objectInfo: objectInfo,
}
} }
// getObject - retrieve object from Object Storage. // getObject - retrieve object from Object Storage.
@ -327,13 +460,13 @@ func (r *objectReadAtCloser) Close() (err error) {
// //
// For more information about the HTTP Range header. // For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. // go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectStat, error) { func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments. // Validate input arguments.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return nil, ObjectStat{}, err return nil, ObjectInfo{}, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return nil, ObjectStat{}, err return nil, ObjectInfo{}, err
} }
customHeader := make(http.Header) customHeader := make(http.Header)
@ -353,16 +486,16 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
customHeader: customHeader, customHeader: customHeader,
}) })
if err != nil { if err != nil {
return nil, ObjectStat{}, err return nil, ObjectInfo{}, err
} }
// Execute the request. // Execute the request.
resp, err := c.do(req) resp, err := c.do(req)
if err != nil { if err != nil {
return nil, ObjectStat{}, err return nil, ObjectInfo{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return nil, ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) return nil, ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
@ -374,7 +507,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
msg := "Last-Modified time format not recognized. " + reportIssue msg := "Last-Modified time format not recognized. " + reportIssue
return nil, ObjectStat{}, ErrorResponse{ return nil, ObjectInfo{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: msg, Message: msg,
RequestID: resp.Header.Get("x-amz-request-id"), RequestID: resp.Header.Get("x-amz-request-id"),
@ -387,7 +520,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
if contentType == "" { if contentType == "" {
contentType = "application/octet-stream" contentType = "application/octet-stream"
} }
var objectStat ObjectStat var objectStat ObjectInfo
objectStat.ETag = md5sum objectStat.ETag = md5sum
objectStat.Key = objectName objectStat.Key = objectName
objectStat.Size = resp.ContentLength objectStat.Size = resp.ContentLength

View file

@ -33,7 +33,7 @@ import (
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (c Client) ListBuckets() ([]BucketStat, error) { func (c Client) ListBuckets() ([]BucketInfo, error) {
// Instantiate a new request. // Instantiate a new request.
req, err := c.newRequest("GET", requestMetadata{}) req, err := c.newRequest("GET", requestMetadata{})
if err != nil { if err != nil {
@ -64,19 +64,25 @@ func (c Client) ListBuckets() ([]BucketStat, error) {
// the specified bucket. If recursion is enabled it would list // the specified bucket. If recursion is enabled it would list
// all subdirectories and all its contents. // all subdirectories and all its contents.
// //
// Your input paramters are just bucketName, objectPrefix and recursive. If you // Your input paramters are just bucketName, objectPrefix, recursive
// enable recursive as 'true' this function will return back all the // and a done channel for pro-actively closing the internal go
// objects in a given bucket name and object prefix. // routine. If you enable recursive as 'true' this function will
// return back all the objects in a given bucket name and object
// prefix.
// //
// api := client.New(....) // api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true // recursive := true
// for message := range api.ListObjects("mytestbucket", "starthere", recursive) { // for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat { func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel. // Allocate new list objects channel.
objectStatCh := make(chan ObjectStat, 1000) objectStatCh := make(chan ObjectInfo, 1000)
// Default listing is delimited at "/" // Default listing is delimited at "/"
delimiter := "/" delimiter := "/"
if recursive { if recursive {
@ -86,7 +92,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// Validate bucket name. // Validate bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh) defer close(objectStatCh)
objectStatCh <- ObjectStat{ objectStatCh <- ObjectInfo{
Err: err, Err: err,
} }
return objectStatCh return objectStatCh
@ -94,14 +100,14 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// Validate incoming object prefix. // Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectStatCh) defer close(objectStatCh)
objectStatCh <- ObjectStat{ objectStatCh <- ObjectInfo{
Err: err, Err: err,
} }
return objectStatCh return objectStatCh
} }
// Initiate list objects goroutine here. // Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectStat) { go func(objectStatCh chan<- ObjectInfo) {
defer close(objectStatCh) defer close(objectStatCh)
// Save marker for next request. // Save marker for next request.
var marker string var marker string
@ -109,7 +115,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// Get list of objects a maximum of 1000 per request. // Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
if err != nil { if err != nil {
objectStatCh <- ObjectStat{ objectStatCh <- ObjectInfo{
Err: err, Err: err,
} }
return return
@ -131,7 +137,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// Send all common prefixes if any. // Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited. // NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes { for _, obj := range result.CommonPrefixes {
object := ObjectStat{} object := ObjectInfo{}
object.Key = obj.Prefix object.Key = obj.Prefix
object.Size = 0 object.Size = 0
select { select {
@ -181,11 +187,22 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
// using them in http request. // using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
// Set object prefix. // Set object prefix.
if objectPrefix != "" {
urlValues.Set("prefix", urlEncodePath(objectPrefix)) urlValues.Set("prefix", urlEncodePath(objectPrefix))
}
// Set object marker. // Set object marker.
if objectMarker != "" {
urlValues.Set("marker", urlEncodePath(objectMarker)) urlValues.Set("marker", urlEncodePath(objectMarker))
}
// Set delimiter. // Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter) urlValues.Set("delimiter", delimiter)
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
}
// Set max keys. // Set max keys.
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
@ -223,26 +240,31 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
// objectPrefix from the specified bucket. If recursion is enabled // objectPrefix from the specified bucket. If recursion is enabled
// it would list all subdirectories and all its contents. // it would list all subdirectories and all its contents.
// //
// Your input paramters are just bucketName, objectPrefix and recursive. // Your input paramters are just bucketName, objectPrefix, recursive
// and a done channel to proactively close the internal go routine.
// If you enable recursive as 'true' this function will return back all // If you enable recursive as 'true' this function will return back all
// the multipart objects in a given bucket name. // the multipart objects in a given bucket name.
// //
// api := client.New(....) // api := client.New(....)
// // Create a done channel.
// doneCh := make(chan struct{})
// defer close(doneCh)
// // Recurively list all objects in 'mytestbucket'
// recursive := true // recursive := true
// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { // for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
// fmt.Println(message) // fmt.Println(message)
// } // }
// //
func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Turn on size aggregation of individual parts. // Turn on size aggregation of individual parts.
isAggregateSize := true isAggregateSize := true
return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
} }
// listIncompleteUploads lists all incomplete uploads. // listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat { func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads. // Allocate channel for multipart uploads.
objectMultipartStatCh := make(chan ObjectMultipartStat, 1000) objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000)
// Delimiter is set to "/" by default. // Delimiter is set to "/" by default.
delimiter := "/" delimiter := "/"
if recursive { if recursive {
@ -252,7 +274,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// Validate bucket name. // Validate bucket name.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh) defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartStat{ objectMultipartStatCh <- ObjectMultipartInfo{
Err: err, Err: err,
} }
return objectMultipartStatCh return objectMultipartStatCh
@ -260,12 +282,12 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// Validate incoming object prefix. // Validate incoming object prefix.
if err := isValidObjectPrefix(objectPrefix); err != nil { if err := isValidObjectPrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh) defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartStat{ objectMultipartStatCh <- ObjectMultipartInfo{
Err: err, Err: err,
} }
return objectMultipartStatCh return objectMultipartStatCh
} }
go func(objectMultipartStatCh chan<- ObjectMultipartStat) { go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
defer close(objectMultipartStatCh) defer close(objectMultipartStatCh)
// object and upload ID marker for future requests. // object and upload ID marker for future requests.
var objectMarker string var objectMarker string
@ -274,7 +296,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// list all multipart uploads. // list all multipart uploads.
result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
if err != nil { if err != nil {
objectMultipartStatCh <- ObjectMultipartStat{ objectMultipartStatCh <- ObjectMultipartInfo{
Err: err, Err: err,
} }
return return
@ -289,7 +311,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// Get total multipart size. // Get total multipart size.
obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
if err != nil { if err != nil {
objectMultipartStatCh <- ObjectMultipartStat{ objectMultipartStatCh <- ObjectMultipartInfo{
Err: err, Err: err,
} }
} }
@ -305,7 +327,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// Send all common prefixes if any. // Send all common prefixes if any.
// NOTE: prefixes are only present if the request is delimited. // NOTE: prefixes are only present if the request is delimited.
for _, obj := range result.CommonPrefixes { for _, obj := range result.CommonPrefixes {
object := ObjectMultipartStat{} object := ObjectMultipartInfo{}
object.Key = obj.Prefix object.Key = obj.Prefix
object.Size = 0 object.Size = 0
select { select {
@ -343,13 +365,26 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
// Set uploads. // Set uploads.
urlValues.Set("uploads", "") urlValues.Set("uploads", "")
// Set object key marker. // Set object key marker.
if keyMarker != "" {
urlValues.Set("key-marker", urlEncodePath(keyMarker)) urlValues.Set("key-marker", urlEncodePath(keyMarker))
}
// Set upload id marker. // Set upload id marker.
if uploadIDMarker != "" {
urlValues.Set("upload-id-marker", uploadIDMarker) urlValues.Set("upload-id-marker", uploadIDMarker)
}
// Set prefix marker. // Set prefix marker.
if prefix != "" {
urlValues.Set("prefix", urlEncodePath(prefix)) urlValues.Set("prefix", urlEncodePath(prefix))
}
// Set delimiter. // Set delimiter.
if delimiter != "" {
urlValues.Set("delimiter", delimiter) urlValues.Set("delimiter", delimiter)
}
// maxUploads should be 1000 or less.
if maxUploads == 0 || maxUploads > 1000 {
maxUploads = 1000
}
// Set max-uploads. // Set max-uploads.
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
@ -445,12 +480,15 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (
} }
// listObjectPartsQuery (List Parts query) // listObjectPartsQuery (List Parts query)
// - lists some or all (up to 1000) parts that have been uploaded for a specific multipart upload // - lists some or all (up to 1000) parts that have been uploaded
// for a specific multipart upload
// //
// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. // You can use the request parameters as selection criteria to return
// request paramters :- // a subset of the uploads in a bucket, request paramters :-
// --------- // ---------
// ?part-number-marker - Specifies the part after which listing should begin. // ?part-number-marker - Specifies the part after which listing should
// begin.
// ?max-parts - Maximum parts to be listed per request.
func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
// Get resources properly escaped and lined up before using them in http request. // Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values) urlValues := make(url.Values)
@ -458,6 +496,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
// Set upload id. // Set upload id.
urlValues.Set("uploadId", uploadID) urlValues.Set("uploadId", uploadID)
// maxParts should be 1000 or less.
if maxParts == 0 || maxParts > 1000 {
maxParts = 1000
}
// Set max parts. // Set max parts.
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))

View file

@ -21,7 +21,7 @@ import (
"time" "time"
) )
// PresignedGetObject returns a presigned URL to access an object without credentials. // PresignedGetObject - Returns a presigned URL to access an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) { func (c Client) PresignedGetObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation. // Input validation.
@ -50,7 +50,7 @@ func (c Client) PresignedGetObject(bucketName, objectName string, expires time.D
return req.URL.String(), nil return req.URL.String(), nil
} }
// PresignedPutObject returns a presigned URL to upload an object without credentials. // PresignedPutObject - Returns a presigned URL to upload an object without credentials.
// Expires maximum is 7days - ie. 604800 and minimum is 1. // Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) { func (c Client) PresignedPutObject(bucketName, objectName string, expires time.Duration) (string, error) {
// Input validation. // Input validation.
@ -79,7 +79,7 @@ func (c Client) PresignedPutObject(bucketName, objectName string, expires time.D
return req.URL.String(), nil return req.URL.String(), nil
} }
// PresignedPostPolicy returns POST form data to upload an object at a location. // PresignedPostPolicy - Returns POST form data to upload an object at a location.
func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
// Validate input arguments. // Validate input arguments.
if p.expiration.IsZero() { if p.expiration.IsZero() {
@ -93,7 +93,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
} }
bucketName := p.formData["bucket"] bucketName := p.formData["bucket"]
// Fetch the location. // Fetch the bucket location.
location, err := c.getBucketLocation(bucketName) location, err := c.getBucketLocation(bucketName)
if err != nil { if err != nil {
return nil, err return nil, err
@ -101,6 +101,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
// Keep time. // Keep time.
t := time.Now().UTC() t := time.Now().UTC()
// For signature version '2' handle here.
if c.signature.isV2() { if c.signature.isV2() {
policyBase64 := p.base64() policyBase64 := p.base64()
p.formData["policy"] = policyBase64 p.formData["policy"] = policyBase64
@ -135,7 +136,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) {
condition: "$x-amz-credential", condition: "$x-amz-credential",
value: credential, value: credential,
}) })
// get base64 encoded policy. // Get base64 encoded policy.
policyBase64 := p.base64() policyBase64 := p.base64()
// Fill in the form data. // Fill in the form data.
p.formData["policy"] = policyBase64 p.formData["policy"] = policyBase64

View file

@ -97,7 +97,7 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
} }
// Set get bucket location always as path style. // Set get bucket location always as path style.
targetURL := c.endpointURL targetURL := *c.endpointURL
if bucketName != "" { if bucketName != "" {
// If endpoint supports virtual host style use that always. // If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support this. // Currently only S3 and Google Cloud Storage would support this.
@ -132,7 +132,7 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str
// If location is not 'us-east-1' create bucket location config. // If location is not 'us-east-1' create bucket location config.
if location != "us-east-1" && location != "" { if location != "us-east-1" && location != "" {
createBucketConfig := new(createBucketConfiguration) createBucketConfig := createBucketConfiguration{}
createBucketConfig.Location = location createBucketConfig.Location = location
var createBucketConfigBytes []byte var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig) createBucketConfigBytes, err = xml.Marshal(createBucketConfig)

View file

@ -28,7 +28,8 @@ import (
"sort" "sort"
) )
// getUploadID if already present for object name or initiate a request to fetch a new upload id. // getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) { func (c Client) getUploadID(bucketName, objectName, contentType string) (string, error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
@ -60,84 +61,16 @@ func (c Client) getUploadID(bucketName, objectName, contentType string) (string,
return uploadID, nil return uploadID, nil
} }
// FPutObject - put object a file. // computeHash - Calculates MD5 and SHA256 for an input read Seeker.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Open the referenced file.
fileData, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileData.Close()
// Save the file stat.
fileStat, err := fileData.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
if fileSize > int64(maxMultipartPutObjectSize) {
return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.")
}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if isGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
n, err := c.putNoChecksum(bucketName, objectName, fileData, fileSize, contentType)
return n, err
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
n, err := c.putAnonymous(bucketName, objectName, fileData, fileSize, contentType)
return n, err
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minimumPartSize {
return c.putSmallObject(bucketName, objectName, fileData, fileSize, contentType)
}
return c.fputLargeObject(bucketName, objectName, fileData, fileSize, contentType)
}
// computeHash - calculates MD5 and Sha256 for an input read Seeker.
func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and Sha256 hasher. // MD5 and SHA256 hasher.
var hashMD5, hashSha256 hash.Hash var hashMD5, hashSHA256 hash.Hash
// MD5 and Sha256 hasher. // MD5 and SHA256 hasher.
hashMD5 = md5.New() hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5) hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() { if c.signature.isV4() {
hashSha256 = sha256.New() hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSha256) hashWriter = io.MultiWriter(hashMD5, hashSHA256)
} }
size, err = io.Copy(hashWriter, reader) size, err = io.Copy(hashWriter, reader)
@ -153,12 +86,13 @@ func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, siz
// Finalize md5shum and sha256 sum. // Finalize md5shum and sha256 sum.
md5Sum = hashMD5.Sum(nil) md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() { if c.signature.isV4() {
sha256Sum = hashSha256.Sum(nil) sha256Sum = hashSHA256.Sum(nil)
} }
return md5Sum, sha256Sum, size, nil return md5Sum, sha256Sum, size, nil
} }
func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File, fileSize int64, contentType string) (int64, error) { // FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -167,27 +101,119 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File
return 0, err return 0, err
} }
// getUploadID for an object, initiates a new multipart request // Open the referenced file.
fileReader, err := os.Open(filePath)
// If any error fail quickly here.
if err != nil {
return 0, err
}
defer fileReader.Close()
// Save the file stat.
fileStat, err := fileReader.Stat()
if err != nil {
return 0, err
}
// Save the file size.
fileSize := fileStat.Size()
// Check for largest object size allowed.
if fileSize > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(fileSize, bucketName, objectName)
}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if isGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minimumPartSize {
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType)
}
// Upload all large objects as multipart.
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// If size of file is greater than '5GiB' fail.
if fileSize > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(fileSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType)
}
return n, err
}
return n, nil
}
// putObjectMultipartFromFile - Creates object from contents of *os.File
//
// NOTE: This function is meant to be used for readers with local
// file as in *os.File. This function resumes by skipping all the
// necessary parts which were already uploaded by verifying them
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader *os.File, fileSize int64, contentType string) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object. // if it cannot find any previously partially uploaded object.
uploadID, err := c.getUploadID(bucketName, objectName, contentType) uploadID, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil { if err != nil {
return 0, err return 0, err
} }
// total data read and written to server. should be equal to 'size' at the end of the call. // Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64 var totalUploadedSize int64
// Complete multipart upload. // Complete multipart upload.
var completeMultipartUpload completeMultipartUpload var completeMultipartUpload completeMultipartUpload
// Fetch previously upload parts and save the total size. // Fetch previously upload parts.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Previous maximum part size // Previous maximum part size
var prevMaxPartSize int64 var prevMaxPartSize int64
// Loop through all parts and calculate totalUploadedSize. // Loop through all parts and fetch prevMaxPartSize.
for _, partInfo := range partsInfo { for _, partInfo := range partsInfo {
// Choose the maximum part size. // Choose the maximum part size.
if partInfo.Size >= prevMaxPartSize { if partInfo.Size >= prevMaxPartSize {
@ -197,7 +223,7 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File
// Calculate the optimal part size for a given file size. // Calculate the optimal part size for a given file size.
partSize := optimalPartSize(fileSize) partSize := optimalPartSize(fileSize)
// If prevMaxPartSize is set use that. // Use prevMaxPartSize if available.
if prevMaxPartSize != 0 { if prevMaxPartSize != 0 {
partSize = prevMaxPartSize partSize = prevMaxPartSize
} }
@ -205,52 +231,39 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File
// Part number always starts with '0'. // Part number always starts with '0'.
partNumber := 0 partNumber := 0
// Loop through until EOF. // Upload each part until fileSize.
for totalUploadedSize < fileSize { for totalUploadedSize < fileSize {
// Increment part number. // Increment part number.
partNumber++ partNumber++
// Get a section reader on a particular offset. // Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileData, totalUploadedSize, partSize) sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
// Calculates MD5 and Sha256 sum for a section reader. // Calculates MD5 and SHA256 sum for a section reader.
md5Sum, sha256Sum, size, err := c.computeHash(sectionReader) md5Sum, sha256Sum, size, err := c.computeHash(sectionReader)
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Save all the part metadata. // Verify if part was not uploaded.
prtData := partData{
ReadCloser: ioutil.NopCloser(sectionReader),
Size: size,
MD5Sum: md5Sum,
Sha256Sum: sha256Sum,
Number: partNumber, // Part number to be uploaded.
}
// If part not uploaded proceed to upload.
if !isPartUploaded(objectPart{ if !isPartUploaded(objectPart{
ETag: hex.EncodeToString(prtData.MD5Sum), ETag: hex.EncodeToString(md5Sum),
PartNumber: prtData.Number, PartNumber: partNumber,
}, partsInfo) { }, partsInfo) {
// Upload the part. // Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData) objPart, err := c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(sectionReader), partNumber, md5Sum, sha256Sum, size)
if err != nil { if err != nil {
prtData.ReadCloser.Close()
return totalUploadedSize, err return totalUploadedSize, err
} }
// Save successfully uploaded part metadata. // Save successfully uploaded part metadata.
partsInfo[prtData.Number] = objPart partsInfo[partNumber] = objPart
} }
// Close the read closer for temporary file.
prtData.ReadCloser.Close()
// Save successfully uploaded size. // Save successfully uploaded size.
totalUploadedSize += prtData.Size totalUploadedSize += size
} }
// if totalUploadedSize is different than the file 'size'. Do not complete the request throw an error. // Verify if we uploaded all data.
if totalUploadedSize != fileSize { if totalUploadedSize != fileSize {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
} }
@ -263,7 +276,7 @@ func (c Client) fputLargeObject(bucketName, objectName string, fileData *os.File
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
} }
// If partNumber is different than total list of parts, error out. // Verify if partNumber is different than total list of parts.
if partNumber != len(completeMultipartUpload.Parts) { if partNumber != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
} }

View file

@ -0,0 +1,421 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"hash"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
)
// Verify if reader is *os.File
func isFile(reader io.Reader) (ok bool) {
_, ok = reader.(*os.File)
return
}
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
return
}
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
return
}
// hashCopyN - Calculates Md5sum and SHA256sum for upto partSize amount of bytes.
func (c Client) hashCopyN(writer io.ReadWriteSeeker, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// MD5 and SHA256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
}
// Copies to input at writer.
size, err = io.CopyN(hashWriter, reader, partSize)
if err != nil {
// If not EOF return error right here.
if err != io.EOF {
return nil, nil, 0, err
}
}
// Seek back to beginning of input, any error fail right here.
if _, err := writer.Seek(0, 0); err != nil {
return nil, nil, 0, err
}
// Finalize md5shum and sha256 sum.
md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
return md5Sum, sha256Sum, size, err
}
// Comprehensive put object operation involving multipart resumable uploads.
//
// Following code handles these types of readers.
//
// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
if size > 0 && size >= minimumPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
// while it is going to be duck typed similar to io.ReaderAt.
// It is to indicate that *minio.Object implements io.ReaderAt.
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType)
}
// putObjectStream uploads files bigger than 5MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Fetch previously upload parts.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
// Previous maximum part size
var prevMaxPartSize int64
// Loop through all parts and calculate totalUploadedSize.
for _, partInfo := range partsInfo {
// Choose the maximum part size.
if partInfo.Size >= prevMaxPartSize {
prevMaxPartSize = partInfo.Size
}
}
// Calculate the optimal part size for a given size.
partSize := optimalPartSize(size)
// Use prevMaxPartSize if available.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// Part number always starts with '0'.
partNumber := 0
// Upload each part until EOF.
for {
// Increment part number.
partNumber++
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject-stream")
if err != nil {
return 0, err
}
// Calculates MD5 and SHA256 sum while copying partSize bytes into tmpFile.
md5Sum, sha256Sum, size, rErr := c.hashCopyN(tmpFile, reader, partSize)
if rErr != nil {
if rErr != io.EOF {
return 0, rErr
}
}
// Verify if part was not uploaded.
if !isPartUploaded(objectPart{
ETag: hex.EncodeToString(md5Sum),
PartNumber: partNumber,
}, partsInfo) {
// Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, size)
if err != nil {
// Close the temporary file upon any error.
tmpFile.Close()
return 0, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
}
// Close the temporary file.
tmpFile.Close()
// If read error was an EOF, break out of the loop.
if rErr == io.EOF {
break
}
}
// Verify if we uploaded all the data.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
// Save successfully uploaded size.
totalUploadedSize += part.Size
}
// Verify if partNumber is different than total list of parts.
if partNumber != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if contentType == "" {
contentType = "application/octet-stream"
}
// Set ContentType header.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return initiateMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml for new multipart upload.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// uploadPart - Uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return objectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return objectPart{}, err
}
if size > maxPartSize {
return objectPart{}, ErrEntityTooLarge(size, bucketName, objectName)
}
if size <= -1 {
return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(partNumber))
// Set upload id.
urlValues.Set("uploadId", uploadID)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
contentSHA256Bytes: sha256Sum,
}
// Instantiate a request.
req, err := c.newRequest("PUT", reqMetadata)
if err != nil {
return objectPart{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return objectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
objPart := objectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
return objPart, nil
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
contentLength: int64(completeMultipartUploadBuffer.Len()),
contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
if err != nil {
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, nil
}

View file

@ -1,378 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"sort"
)
// PutObjectPartial put object partial.
func (c Client) PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Input size negative should return error.
if size < 0 {
return 0, ErrInvalidArgument("Input file size cannot be negative.")
}
// Input size bigger than 5TiB should fail.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrInvalidArgument("Input file size is bigger than the supported maximum of 5TiB.")
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
if isGoogleEndpoint(c.endpointURL) {
if size > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", size),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
n, err := c.putPartialNoChksum(bucketName, objectName, data, size, contentType)
return n, err
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if size > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size),
Key: objectName,
BucketName: bucketName,
}
}
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
n, err := c.putPartialAnonymous(bucketName, objectName, data, size, contentType)
return n, err
}
// Small file upload is initiated for uploads for input data size smaller than 5MiB.
if size < minimumPartSize {
n, err = c.putPartialSmallObject(bucketName, objectName, data, size, contentType)
return n, err
}
n, err = c.putPartialLargeObject(bucketName, objectName, data, size, contentType)
return n, err
}
// putNoChecksumPartial special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putPartialNoChksum(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxPartSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Create a new pipe to stage the reads.
reader, writer := io.Pipe()
// readAtOffset to carry future offsets.
var readAtOffset int64
// readAt defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, 1024*1024*5)
// Initiate a routine to start writing.
go func() {
for {
readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
writer.CloseWithError(rerr)
return
}
}
writeSize, werr := writer.Write(readAtBuffer[:readAtSize])
if werr != nil {
writer.CloseWithError(werr)
return
}
if readAtSize != writeSize {
writer.CloseWithError(errors.New("Something really bad happened here. " + reportIssue))
return
}
readAtOffset += int64(writeSize)
if rerr == io.EOF {
writer.Close()
return
}
}
}()
// For anonymous requests, we will not calculate sha256 and md5sum.
putObjData := putObjectData{
MD5Sum: nil,
Sha256Sum: nil,
ReadCloser: reader,
Size: size,
ContentType: contentType,
}
// Execute put object.
st, err := c.putObject(bucketName, objectName, putObjData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putAnonymousPartial is a special function for uploading content as anonymous request.
// This special function is necessary since Amazon S3 doesn't allow anonymous multipart uploads.
func (c Client) putPartialAnonymous(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
return c.putPartialNoChksum(bucketName, objectName, data, size, contentType)
}
// putSmallObjectPartial uploads files smaller than 5MiB.
func (c Client) putPartialSmallObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// readAt defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, size)
readAtSize, err := data.ReadAt(readAtBuffer, 0)
if err != nil {
if err != io.EOF {
return 0, err
}
}
if int64(readAtSize) != size {
return 0, ErrUnexpectedEOF(int64(readAtSize), size, bucketName, objectName)
}
// Construct a new PUT object metadata.
putObjData := putObjectData{
MD5Sum: sumMD5(readAtBuffer),
Sha256Sum: sum256(readAtBuffer),
ReadCloser: ioutil.NopCloser(bytes.NewReader(readAtBuffer)),
Size: size,
ContentType: contentType,
}
// Single part use case, use putObject directly.
st, err := c.putObject(bucketName, objectName, putObjData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putPartialLargeObject uploads files bigger than 5MiB.
func (c Client) putPartialLargeObject(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Fetch previously upload parts and save the total size.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
// Previous maximum part size
var prevMaxPartSize int64
// previous part number.
var prevPartNumber int
// Loop through all parts and calculate totalUploadedSize.
for _, partInfo := range partsInfo {
totalUploadedSize += partInfo.Size
// Choose the maximum part size.
if partInfo.Size >= prevMaxPartSize {
prevMaxPartSize = partInfo.Size
}
// Save previous part number.
prevPartNumber = partInfo.PartNumber
}
// Calculate the optimal part size for a given file size.
partSize := optimalPartSize(size)
// If prevMaxPartSize is set use that.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// MD5 and Sha256 hasher.
var hashMD5, hashSha256 hash.Hash
// Part number always starts with prevPartNumber + 1. i.e The next part number.
partNumber := prevPartNumber + 1
// Loop through until EOF.
for totalUploadedSize < size {
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject-partial")
if err != nil {
return 0, err
}
// Create a hash multiwriter.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() {
hashSha256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSha256)
}
writer := io.MultiWriter(tmpFile, hashWriter)
// totalUploadedSize is the current readAtOffset.
readAtOffset := totalUploadedSize
// Read until partSize.
var totalReadPartSize int64
// readAt defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadAtBufferSize)
// Loop through until partSize.
for totalReadPartSize < partSize {
readAtSize, rerr := data.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
return 0, rerr
}
}
writeSize, werr := writer.Write(readAtBuffer[:readAtSize])
if werr != nil {
return 0, werr
}
if readAtSize != writeSize {
return 0, errors.New("Something really bad happened here. " + reportIssue)
}
readAtOffset += int64(writeSize)
totalReadPartSize += int64(writeSize)
if rerr == io.EOF {
break
}
}
// Seek back to beginning of the temporary file.
if _, err := tmpFile.Seek(0, 0); err != nil {
return 0, err
}
// Save all the part metadata.
prtData := partData{
ReadCloser: tmpFile,
MD5Sum: hashMD5.Sum(nil),
Size: totalReadPartSize,
}
// Signature version '4'.
if c.signature.isV4() {
prtData.Sha256Sum = hashSha256.Sum(nil)
}
// Current part number to be uploaded.
prtData.Number = partNumber
// execute upload part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData)
if err != nil {
// Close the read closer.
prtData.ReadCloser.Close()
return totalUploadedSize, err
}
// Save successfully uploaded size.
totalUploadedSize += prtData.Size
// Save successfully uploaded part metadata.
partsInfo[prtData.Number] = objPart
// Move to next part.
partNumber++
}
// If size is greater than zero verify totalUploaded.
// if totalUploaded is different than the input 'size', do not complete the request throw an error.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View file

@ -0,0 +1,196 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/md5"
"crypto/sha256"
"errors"
"hash"
"io"
"sort"
)
// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
// of type which implements io.ReaderAt interface (ReadAt method).
//
// NOTE: This function is meant to be used for all readers which
// implement io.ReaderAt which allows us for resuming multipart
// uploads but reading at an offset, which would avoid re-read the
// data which was already uploaded. Internally this function uses
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Fetch previously upload parts.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
// Previous maximum part size
var prevMaxPartSize int64
// Previous part number.
var prevPartNumber int
// Loop through all parts and calculate totalUploadedSize.
for _, partInfo := range partsInfo {
totalUploadedSize += partInfo.Size
// Choose the maximum part size.
if partInfo.Size >= prevMaxPartSize {
prevMaxPartSize = partInfo.Size
}
// Save previous part number.
prevPartNumber = partInfo.PartNumber
}
// Calculate the optimal part size for a given file size.
partSize := optimalPartSize(size)
// If prevMaxPartSize is set use that.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// MD5 and SHA256 hasher.
var hashMD5, hashSHA256 hash.Hash
// Part number always starts with prevPartNumber + 1. i.e The next part number.
partNumber := prevPartNumber + 1
// Upload each part until totalUploadedSize reaches input reader size.
for totalUploadedSize < size {
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject-partial")
if err != nil {
return 0, err
}
// Create a hash multiwriter.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(hashMD5)
if c.signature.isV4() {
hashSHA256 = sha256.New()
hashWriter = io.MultiWriter(hashMD5, hashSHA256)
}
writer := io.MultiWriter(tmpFile, hashWriter)
// Choose totalUploadedSize as the current readAtOffset.
readAtOffset := totalUploadedSize
// Read until partSize.
var totalReadPartSize int64
// ReadAt defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadAtBufferSize)
// Following block reads data at an offset from the input
// reader and copies data to into local temporary file.
// Temporary file data is limited to the partSize.
for totalReadPartSize < partSize {
readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
return 0, rerr
}
}
writeSize, werr := writer.Write(readAtBuffer[:readAtSize])
if werr != nil {
return 0, werr
}
if readAtSize != writeSize {
return 0, errors.New("Something really bad happened here. " + reportIssue)
}
readAtOffset += int64(writeSize)
totalReadPartSize += int64(writeSize)
if rerr == io.EOF {
break
}
}
// Seek back to beginning of the temporary file.
if _, err := tmpFile.Seek(0, 0); err != nil {
return 0, err
}
var md5Sum, sha256Sum []byte
md5Sum = hashMD5.Sum(nil)
// Signature version '4'.
if c.signature.isV4() {
sha256Sum = hashSHA256.Sum(nil)
}
// Proceed to upload the part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, tmpFile, partNumber, md5Sum, sha256Sum, totalReadPartSize)
if err != nil {
// Close the read closer.
tmpFile.Close()
return totalUploadedSize, err
}
// Save successfully uploaded size.
totalUploadedSize += totalReadPartSize
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Move to next part.
partNumber++
}
// Verify if we uploaded all the data.
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}

View file

@ -18,21 +18,43 @@ package minio
import ( import (
"bytes" "bytes"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url" "os"
"sort"
"strconv"
"strings" "strings"
) )
// getReaderSize gets the size of the underlying reader, if possible.
func getReaderSize(reader io.Reader) (size int64, err error) {
size = -1
if reader != nil {
switch v := reader.(type) {
case *bytes.Buffer:
size = int64(v.Len())
case *bytes.Reader:
size = int64(v.Len())
case *strings.Reader:
size = int64(v.Len())
case *os.File:
var st os.FileInfo
st, err = v.Stat()
if err != nil {
return 0, err
}
size = st.Size()
case *Object:
var st ObjectInfo
st, err = v.Stat()
if err != nil {
return 0, err
}
size = st.Size
}
}
return size, nil
}
// completedParts is a collection of parts sortable by their part numbers. // completedParts is a collection of parts sortable by their part numbers.
// used for sorting the uploaded parts before completing the multipart request. // used for sorting the uploaded parts before completing the multipart request.
type completedParts []completePart type completedParts []completePart
@ -54,7 +76,7 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// So we fall back to single PUT operation with the maximum limit of 5GiB. // So we fall back to single PUT operation with the maximum limit of 5GiB.
// //
// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. // NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
func (c Client) PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) { func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return 0, err return 0, err
@ -63,6 +85,17 @@ func (c Client) PutObject(bucketName, objectName string, data io.Reader, size in
return 0, err return 0, err
} }
// get reader size.
size, err := getReaderSize(reader)
if err != nil {
return 0, err
}
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB. // So we fall back to single PUT operation with the maximum limit of 5GiB.
if isGoogleEndpoint(c.endpointURL) { if isGoogleEndpoint(c.endpointURL) {
@ -74,55 +107,108 @@ func (c Client) PutObject(bucketName, objectName string, data io.Reader, size in
BucketName: bucketName, BucketName: bucketName,
} }
} }
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size. // Do not compute MD5 for Google Cloud Storage. Uploads upto 5GiB in size.
return c.putNoChecksum(bucketName, objectName, data, size, contentType) return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType)
} }
// NOTE: S3 doesn't allow anonymous multipart requests. // NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous { if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 || size > int64(maxSinglePutObjectSize) { if size <= -1 {
return 0, ErrorResponse{ return 0, ErrorResponse{
Code: "NotImplemented", Code: "NotImplemented",
Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", size), Message: "Content-Length cannot be negative for anonymous requests.",
Key: objectName, Key: objectName,
BucketName: bucketName, BucketName: bucketName,
} }
} }
// Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size. if size > maxSinglePutObjectSize {
return c.putAnonymous(bucketName, objectName, data, size, contentType)
}
// Large file upload is initiated for uploads for input data size
// if its greater than 5MiB or data size is negative.
if size >= minimumPartSize || size < 0 {
return c.putLargeObject(bucketName, objectName, data, size, contentType)
}
return c.putSmallObject(bucketName, objectName, data, size, contentType)
}
// putNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxPartSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName) return 0, ErrEntityTooLarge(size, bucketName, objectName)
} }
// For anonymous requests, we will not calculate sha256 and md5sum. // Do not compute MD5 for anonymous requests to Amazon S3. Uploads upto 5GiB in size.
putObjData := putObjectData{ return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType)
MD5Sum: nil, }
Sha256Sum: nil,
ReadCloser: ioutil.NopCloser(data), // putSmall object.
Size: size, if size < minimumPartSize && size > 0 {
ContentType: contentType, return c.putObjectSingle(bucketName, objectName, reader, size, contentType)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "NotImplemented" {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, reader, size, contentType)
}
return n, err
}
return n, nil
}
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, bucketName, objectName)
}
// If size is a stream, upload upto 5GiB.
if size <= -1 {
size = maxSinglePutObjectSize
}
// Initialize a new temporary file.
tmpFile, err := newTempFile("single$-putobject-single")
if err != nil {
return 0, err
}
md5Sum, sha256Sum, size, err := c.hashCopyN(tmpFile, reader, size)
if err != nil {
if err != io.EOF {
return 0, err
}
} }
// Execute put object. // Execute put object.
st, err := c.putObject(bucketName, objectName, putObjData) st, err := c.putObjectDo(bucketName, objectName, tmpFile, md5Sum, sha256Sum, size, contentType)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -132,442 +218,67 @@ func (c Client) putNoChecksum(bucketName, objectName string, data io.Reader, siz
return size, nil return size, nil
} }
// putAnonymous is a special function for uploading content as anonymous request. // putObjectDo - executes the put object http operation.
// This special function is necessary since Amazon S3 doesn't allow anonymous
// multipart uploads.
func (c Client) putAnonymous(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
return c.putNoChecksum(bucketName, objectName, data, size, contentType)
}
// putSmallObject uploads files smaller than 5 mega bytes.
func (c Client) putSmallObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// Read input data fully into buffer.
dataBytes, err := ioutil.ReadAll(data)
if err != nil {
return 0, err
}
if int64(len(dataBytes)) != size {
return 0, ErrUnexpectedEOF(int64(len(dataBytes)), size, bucketName, objectName)
}
// Construct a new PUT object metadata.
putObjData := putObjectData{
MD5Sum: sumMD5(dataBytes),
Sha256Sum: sum256(dataBytes),
ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
Size: size,
ContentType: contentType,
}
// Single part use case, use putObject directly.
st, err := c.putObject(bucketName, objectName, putObjData)
if err != nil {
return 0, err
}
if st.Size != size {
return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
}
return size, nil
}
// hashCopy - calculates Md5sum and Sha256sum for upto partSize amount of bytes.
func (c Client) hashCopy(writer io.ReadWriteSeeker, data io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
// MD5 and Sha256 hasher.
var hashMD5, hashSha256 hash.Hash
// MD5 and Sha256 hasher.
hashMD5 = md5.New()
hashWriter := io.MultiWriter(writer, hashMD5)
if c.signature.isV4() {
hashSha256 = sha256.New()
hashWriter = io.MultiWriter(writer, hashMD5, hashSha256)
}
// Copies to input at writer.
size, err = io.CopyN(hashWriter, data, partSize)
if err != nil {
// If not EOF return error right here.
if err != io.EOF {
return nil, nil, 0, err
}
}
// Seek back to beginning of input, any error fail right here.
if _, err := writer.Seek(0, 0); err != nil {
return nil, nil, 0, err
}
// Finalize md5shum and sha256 sum.
md5Sum = hashMD5.Sum(nil)
if c.signature.isV4() {
sha256Sum = hashSha256.Sum(nil)
}
return md5Sum, sha256Sum, size, err
}
// putLargeObject uploads files bigger than 5 mega bytes.
func (c Client) putLargeObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
}
if err := isValidObjectName(objectName); err != nil {
return 0, err
}
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, err := c.getUploadID(bucketName, objectName, contentType)
if err != nil {
return 0, err
}
// total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// Fetch previously upload parts and save the total size.
partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
// Previous maximum part size
var prevMaxPartSize int64
// Loop through all parts and calculate totalUploadedSize.
for _, partInfo := range partsInfo {
// Choose the maximum part size.
if partInfo.Size >= prevMaxPartSize {
prevMaxPartSize = partInfo.Size
}
}
// Calculate the optimal part size for a given size.
partSize := optimalPartSize(size)
// If prevMaxPartSize is set use that.
if prevMaxPartSize != 0 {
partSize = prevMaxPartSize
}
// Part number always starts with '0'.
partNumber := 0
// Loop through until EOF.
for {
// Increment part number.
partNumber++
// Initialize a new temporary file.
tmpFile, err := newTempFile("multiparts$-putobject")
if err != nil {
return 0, err
}
// Calculates MD5 and Sha256 sum while copying partSize bytes into tmpFile.
md5Sum, sha256Sum, size, rErr := c.hashCopy(tmpFile, data, partSize)
if rErr != nil {
if rErr != io.EOF {
return 0, rErr
}
}
// Save all the part metadata.
prtData := partData{
ReadCloser: tmpFile,
Size: size,
MD5Sum: md5Sum,
Sha256Sum: sha256Sum,
Number: partNumber, // Current part number to be uploaded.
}
// If part not uploaded proceed to upload.
if !isPartUploaded(objectPart{
ETag: hex.EncodeToString(prtData.MD5Sum),
PartNumber: partNumber,
}, partsInfo) {
// execute upload part.
objPart, err := c.uploadPart(bucketName, objectName, uploadID, prtData)
if err != nil {
// Close the read closer.
prtData.ReadCloser.Close()
return 0, err
}
// Save successfully uploaded part metadata.
partsInfo[prtData.Number] = objPart
}
// Close the read closer.
prtData.ReadCloser.Close()
// If read error was an EOF, break out of the loop.
if rErr == io.EOF {
break
}
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
// Save successfully uploaded size.
totalUploadedSize += part.Size
}
// If size is greater than zero verify totalUploadedSize. if totalUploadedSize is
// different than the input 'size', do not complete the request throw an error.
if size > 0 {
if totalUploadedSize != size {
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
}
// If partNumber is different than total list of parts, error out.
if partNumber != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
// Return final size.
return totalUploadedSize, nil
}
// putObject - add an object to a bucket.
// NOTE: You must have WRITE permissions on a bucket to add an object to it. // NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObject(bucketName, objectName string, putObjData putObjectData) (ObjectStat, error) { func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
// Input validation. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
if strings.TrimSpace(putObjData.ContentType) == "" { if size <= -1 {
putObjData.ContentType = "application/octet-stream" return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if size > maxSinglePutObjectSize {
return ObjectInfo{}, ErrEntityTooLarge(size, bucketName, objectName)
}
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
} }
// Set headers. // Set headers.
customHeader := make(http.Header) customHeader := make(http.Header)
customHeader.Set("Content-Type", putObjData.ContentType) customHeader.Set("Content-Type", contentType)
// Populate request metadata. // Populate request metadata.
reqMetadata := requestMetadata{ reqMetadata := requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
customHeader: customHeader, customHeader: customHeader,
contentBody: putObjData.ReadCloser, contentBody: reader,
contentLength: putObjData.Size, contentLength: size,
contentSha256Bytes: putObjData.Sha256Sum, contentMD5Bytes: md5Sum,
contentMD5Bytes: putObjData.MD5Sum, contentSHA256Bytes: sha256Sum,
} }
// Initiate new request. // Initiate new request.
req, err := c.newRequest("PUT", reqMetadata) req, err := c.newRequest("PUT", reqMetadata)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
// Execute the request. // Execute the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
var metadata ObjectStat var metadata ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end. // Trim off the odd double quotes from ETag in the beginning and end.
metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"") metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
// A success here means data was written to server successfully. // A success here means data was written to server successfully.
metadata.Size = putObjData.Size metadata.Size = size
// Return here. // Return here.
return metadata, nil return metadata, nil
} }
// initiateMultipartUpload initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if contentType == "" {
contentType = "application/octet-stream"
}
// set ContentType header.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
customHeader: customHeader,
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return initiateMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return initiateMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode xml initiate multipart.
initiateMultipartUploadResult := initiateMultipartUploadResult{}
err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
if err != nil {
return initiateMultipartUploadResult, err
}
return initiateMultipartUploadResult, nil
}
// uploadPart uploads a part in a multipart upload.
func (c Client) uploadPart(bucketName, objectName, uploadID string, uploadingPart partData) (objectPart, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return objectPart{}, err
}
if err := isValidObjectName(objectName); err != nil {
return objectPart{}, err
}
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number.
urlValues.Set("partNumber", strconv.Itoa(uploadingPart.Number))
// Set upload id.
urlValues.Set("uploadId", uploadID)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: uploadingPart.ReadCloser,
contentLength: uploadingPart.Size,
contentSha256Bytes: uploadingPart.Sha256Sum,
contentMD5Bytes: uploadingPart.MD5Sum,
}
// Instantiate a request.
req, err := c.newRequest("PUT", reqMetadata)
if err != nil {
return objectPart{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return objectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return objectPart{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
objPart := objectPart{}
objPart.Size = uploadingPart.Size
objPart.PartNumber = uploadingPart.Number
// Trim off the odd double quotes from ETag in the beginning and end.
objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
return objPart, nil
}
// completeMultipartUpload completes a multipart upload by assembling previously uploaded parts.
func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
if err := isValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
contentBody: ioutil.NopCloser(completeMultipartUploadBuffer),
contentLength: int64(completeMultipartUploadBuffer.Len()),
contentSha256Bytes: sum256(completeMultipartUploadBuffer.Bytes()),
}
// Instantiate the request.
req, err := c.newRequest("POST", reqMetadata)
if err != nil {
return completeMultipartUploadResult{}, err
}
// Execute the request.
resp, err := c.do(req)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
return completeMultipartUploadResult{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
}
}
// If successful response, decode the body.
completeMultipartUploadResult := completeMultipartUploadResult{}
err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
if err != nil {
return completeMultipartUploadResult, err
}
return completeMultipartUploadResult, nil
}

View file

@ -26,15 +26,18 @@ import (
// All objects (including all object versions and delete markers). // All objects (including all object versions and delete markers).
// in the bucket must be deleted before successfully attempting this request. // in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error { func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
// Instantiate a new request.
req, err := c.newRequest("DELETE", requestMetadata{ req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
}) })
if err != nil { if err != nil {
return err return err
} }
// Initiate the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -54,12 +57,14 @@ func (c Client) RemoveBucket(bucketName string) error {
// RemoveObject remove an object from a bucket. // RemoveObject remove an object from a bucket.
func (c Client) RemoveObject(bucketName, objectName string) error { func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return err return err
} }
// Instantiate the request.
req, err := c.newRequest("DELETE", requestMetadata{ req, err := c.newRequest("DELETE", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
@ -67,6 +72,7 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err != nil { if err != nil {
return err return err
} }
// Initiate the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -81,42 +87,32 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
// RemoveIncompleteUpload aborts an partially uploaded object. // RemoveIncompleteUpload aborts an partially uploaded object.
// Requires explicit authentication, no anonymous requests are allowed for multipart API. // Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Validate input arguments. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return err return err
} }
errorCh := make(chan error) // Find multipart upload id of the object to be aborted.
go func(errorCh chan<- error) {
defer close(errorCh)
// Find multipart upload id of the object.
uploadID, err := c.findUploadID(bucketName, objectName) uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil { if err != nil {
errorCh <- err return err
return
} }
if uploadID != "" { if uploadID != "" {
// If uploadID is not an empty string, initiate the request. // Upload id found, abort the incomplete multipart upload.
err := c.abortMultipartUpload(bucketName, objectName, uploadID) err := c.abortMultipartUpload(bucketName, objectName, uploadID)
if err != nil { if err != nil {
errorCh <- err
return
}
return
}
}(errorCh)
err, ok := <-errorCh
if ok && err != nil {
return err return err
} }
}
return nil return nil
} }
// abortMultipartUpload aborts a multipart upload for the given uploadID, all parts are deleted. // abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error { func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Validate input arguments. // Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
@ -138,7 +134,7 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
return err return err
} }
// execute the request. // Initiate the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -146,11 +142,12 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusNoContent { if resp.StatusCode != http.StatusNoContent {
// Abort has no response body, handle it. // Abort has no response body, handle it for any errors.
var errorResponse ErrorResponse var errorResponse ErrorResponse
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusNotFound: case http.StatusNotFound:
// This is needed specifically for Abort and it cannot be converged. // This is needed specifically for abort and it cannot
// be converged into default case.
errorResponse = ErrorResponse{ errorResponse = ErrorResponse{
Code: "NoSuchUpload", Code: "NoSuchUpload",
Message: "The specified multipart upload does not exist.", Message: "The specified multipart upload does not exist.",

View file

@ -21,30 +21,33 @@ import (
"time" "time"
) )
// listAllMyBucketsResult container for listBuckets response // listAllMyBucketsResult container for listBuckets response.
type listAllMyBucketsResult struct { type listAllMyBucketsResult struct {
// Container for one or more buckets. // Container for one or more buckets.
Buckets struct { Buckets struct {
Bucket []BucketStat Bucket []BucketInfo
} }
Owner owner Owner owner
} }
// owner container for bucket owner information // owner container for bucket owner information.
type owner struct { type owner struct {
DisplayName string DisplayName string
ID string ID string
} }
// commonPrefix container for prefix response // commonPrefix container for prefix response.
type commonPrefix struct { type commonPrefix struct {
Prefix string Prefix string
} }
// listBucketResult container for listObjects response // listBucketResult container for listObjects response.
type listBucketResult struct { type listBucketResult struct {
CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you have specified a delimiter // A response can contain CommonPrefixes only if you have
Contents []ObjectStat // Metadata about each object returned // specified a delimiter.
CommonPrefixes []commonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string Delimiter string
// Encoding type used to encode object keys in the response. // Encoding type used to encode object keys in the response.
@ -57,13 +60,15 @@ type listBucketResult struct {
MaxKeys int64 MaxKeys int64
Name string Name string
// When response is truncated (the IsTruncated element value in the response // When response is truncated (the IsTruncated element value in
// is true), you can use the key name in this field as marker in the subsequent // the response is true), you can use the key name in this field
// request to get next set of objects. Object storage lists objects in alphabetical // as marker in the subsequent request to get next set of objects.
// order Note: This element is returned only if you have delimiter request parameter // Object storage lists objects in alphabetical order Note: This
// specified. If response does not include the NextMaker and it is truncated, // element is returned only if you have delimiter request
// you can use the value of the last Key in the response as the marker in the // parameter specified. If response does not include the NextMaker
// subsequent request to get the next set of object keys. // and it is truncated, you can use the value of the last Key in
// the response as the marker in the subsequent request to get the
// next set of object keys.
NextMarker string NextMarker string
Prefix string Prefix string
} }
@ -78,19 +83,20 @@ type listMultipartUploadsResult struct {
EncodingType string EncodingType string
MaxUploads int64 MaxUploads int64
IsTruncated bool IsTruncated bool
Uploads []ObjectMultipartStat `xml:"Upload"` Uploads []ObjectMultipartInfo `xml:"Upload"`
Prefix string Prefix string
Delimiter string Delimiter string
CommonPrefixes []commonPrefix // A response can contain CommonPrefixes only if you specify a delimiter // A response can contain CommonPrefixes only if you specify a delimiter.
CommonPrefixes []commonPrefix
} }
// initiator container for who initiated multipart upload // initiator container for who initiated multipart upload.
type initiator struct { type initiator struct {
ID string ID string
DisplayName string DisplayName string
} }
// objectPart container for particular part of an object // objectPart container for particular part of an object.
type objectPart struct { type objectPart struct {
// Part number identifies the part. // Part number identifies the part.
PartNumber int PartNumber int
@ -98,7 +104,8 @@ type objectPart struct {
// Date and time the part was uploaded. // Date and time the part was uploaded.
LastModified time.Time LastModified time.Time
// Entity tag returned when the part was uploaded, usually md5sum of the part // Entity tag returned when the part was uploaded, usually md5sum
// of the part.
ETag string ETag string
// Size of the uploaded part data. // Size of the uploaded part data.
@ -126,14 +133,16 @@ type listObjectPartsResult struct {
EncodingType string EncodingType string
} }
// initiateMultipartUploadResult container for InitiateMultiPartUpload response. // initiateMultipartUploadResult container for InitiateMultiPartUpload
// response.
type initiateMultipartUploadResult struct { type initiateMultipartUploadResult struct {
Bucket string Bucket string
Key string Key string
UploadID string `xml:"UploadId"` UploadID string `xml:"UploadId"`
} }
// completeMultipartUploadResult container for completed multipart upload response. // completeMultipartUploadResult container for completed multipart
// upload response.
type completeMultipartUploadResult struct { type completeMultipartUploadResult struct {
Location string Location string
Bucket string Bucket string
@ -141,7 +150,8 @@ type completeMultipartUploadResult struct {
ETag string ETag string
} }
// completePart sub container lists individual part numbers and their md5sum, part of completeMultipartUpload. // completePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload.
type completePart struct { type completePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
@ -150,13 +160,13 @@ type completePart struct {
ETag string ETag string
} }
// completeMultipartUpload container for completing multipart upload // completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct { type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
Parts []completePart `xml:"Part"` Parts []completePart `xml:"Part"`
} }
// createBucketConfiguration container for bucket configuration // createBucketConfiguration container for bucket configuration.
type createBucketConfiguration struct { type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"` Location string `xml:"LocationConstraint"`
@ -164,7 +174,8 @@ type createBucketConfiguration struct {
// grant container for the grantee and his or her permissions. // grant container for the grantee and his or her permissions.
type grant struct { type grant struct {
// grantee container for DisplayName and ID of the person being granted permissions. // grantee container for DisplayName and ID of the person being
// granted permissions.
Grantee struct { Grantee struct {
ID string ID string
DisplayName string DisplayName string
@ -175,7 +186,8 @@ type grant struct {
Permission string Permission string
} }
// accessControlPolicy contains the elements providing ACL permissions for a bucket. // accessControlPolicy contains the elements providing ACL permissions
// for a bucket.
type accessControlPolicy struct { type accessControlPolicy struct {
// accessControlList container for ACL information. // accessControlList container for ACL information.
AccessControlList struct { AccessControlList struct {

View file

@ -25,15 +25,18 @@ import (
// BucketExists verify if bucket exists and you have permission to access it. // BucketExists verify if bucket exists and you have permission to access it.
func (c Client) BucketExists(bucketName string) error { func (c Client) BucketExists(bucketName string) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return err return err
} }
// Instantiate a new request.
req, err := c.newRequest("HEAD", requestMetadata{ req, err := c.newRequest("HEAD", requestMetadata{
bucketName: bucketName, bucketName: bucketName,
}) })
if err != nil { if err != nil {
return err return err
} }
// Initiate the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
@ -48,12 +51,13 @@ func (c Client) BucketExists(bucketName string) error {
} }
// StatObject verifies if object exists and you have permission to access. // StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) { func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil { if err := isValidBucketName(bucketName); err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
if err := isValidObjectName(objectName); err != nil { if err := isValidObjectName(objectName); err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
// Instantiate a new request. // Instantiate a new request.
req, err := c.newRequest("HEAD", requestMetadata{ req, err := c.newRequest("HEAD", requestMetadata{
@ -61,16 +65,17 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
objectName: objectName, objectName: objectName,
}) })
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
// Initiate the request.
resp, err := c.do(req) resp, err := c.do(req)
defer closeResponse(resp) defer closeResponse(resp)
if err != nil { if err != nil {
return ObjectStat{}, err return ObjectInfo{}, err
} }
if resp != nil { if resp != nil {
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return ObjectStat{}, HTTPRespToErrorResponse(resp, bucketName, objectName) return ObjectInfo{}, HTTPRespToErrorResponse(resp, bucketName, objectName)
} }
} }
@ -81,7 +86,7 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
// Parse content length. // Parse content length.
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil { if err != nil {
return ObjectStat{}, ErrorResponse{ return ObjectInfo{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue, Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName, BucketName: bucketName,
@ -91,9 +96,10 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
} }
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil { if err != nil {
return ObjectStat{}, ErrorResponse{ return ObjectInfo{}, ErrorResponse{
Code: "InternalError", Code: "InternalError",
Message: "Last-Modified time format is invalid. " + reportIssue, Message: "Last-Modified time format is invalid. " + reportIssue,
BucketName: bucketName, BucketName: bucketName,
@ -103,12 +109,13 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectStat, error) {
AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"),
} }
} }
// Fetch content type if any present.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" { if contentType == "" {
contentType = "application/octet-stream" contentType = "application/octet-stream"
} }
// Save object metadata info. // Save object metadata info.
var objectStat ObjectStat var objectStat ObjectInfo
objectStat.ETag = md5sum objectStat.ETag = md5sum
objectStat.Key = objectName objectStat.Key = objectName
objectStat.Size = size objectStat.Size = size

View file

@ -25,6 +25,7 @@ import (
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
"os" "os"
"regexp"
"runtime" "runtime"
"strings" "strings"
"time" "time"
@ -33,10 +34,15 @@ import (
// Client implements Amazon S3 compatible methods. // Client implements Amazon S3 compatible methods.
type Client struct { type Client struct {
/// Standard options. /// Standard options.
accessKeyID string // AccessKeyID required for authorized requests.
secretAccessKey string // SecretAccessKey required for authorized requests. // AccessKeyID required for authorized requests.
signature SignatureType // Choose a signature type if necessary. accessKeyID string
anonymous bool // Set to 'true' if Client has no access and secret keys. // SecretAccessKey required for authorized requests.
secretAccessKey string
// Choose a signature type if necessary.
signature SignatureType
// Set to 'true' if Client has no access and secret keys.
anonymous bool
// User supplied. // User supplied.
appInfo struct { appInfo struct {
@ -69,7 +75,8 @@ const (
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
) )
// NewV2 - instantiate minio client with Amazon S3 signature version '2' compatiblity. // NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatiblity.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil { if err != nil {
@ -80,7 +87,8 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
return clnt, nil return clnt, nil
} }
// NewV4 - instantiate minio client with Amazon S3 signature version '4' compatibility. // NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil { if err != nil {
@ -91,13 +99,15 @@ func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool)
return clnt, nil return clnt, nil
} }
// New - instantiate minio client Client, adds automatic verification of signature. // New - instantiate minio client Client, adds automatic verification
// of signature.
func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) { func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (CloudStorageClient, error) {
clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Google cloud storage should be set to signature V2, force it if not. // Google cloud storage should be set to signature V2, force it if
// not.
if isGoogleEndpoint(clnt.endpointURL) { if isGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2 clnt.signature = SignatureV2
} }
@ -136,7 +146,8 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
// SetAppInfo - add application details to user agent. // SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) { func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user agent. // if app name and version is not set, we do not a new user
// agent.
if appName != "" && appVersion != "" { if appName != "" && appVersion != "" {
c.appInfo = struct { c.appInfo = struct {
appName string appName string
@ -149,12 +160,13 @@ func (c *Client) SetAppInfo(appName string, appVersion string) {
// SetCustomTransport - set new custom transport. // SetCustomTransport - set new custom transport.
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport ``http.DefaultTransport``. // Set this to override default transport
// ``http.DefaultTransport``.
// //
// This transport is usually needed for debugging OR to add your own // This transport is usually needed for debugging OR to add your
// custom TLS certificates on the client transport, for custom CA's and // own custom TLS certificates on the client transport, for custom
// certs which are not part of standard certificate authority follow this // CA's and certs which are not part of standard certificate
// example :- // authority follow this example :-
// //
// tr := &http.Transport{ // tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool}, // TLSClientConfig: &tls.Config{RootCAs: pool},
@ -187,7 +199,8 @@ func (c *Client) TraceOff() {
c.isTraceEnabled = false c.isTraceEnabled = false
} }
// requestMetadata - is container for all the values to make a request. // requestMetadata - is container for all the values to make a
// request.
type requestMetadata struct { type requestMetadata struct {
// If set newRequest presigns the URL. // If set newRequest presigns the URL.
presignURL bool presignURL bool
@ -202,10 +215,41 @@ type requestMetadata struct {
// Generated by our internal code. // Generated by our internal code.
contentBody io.ReadCloser contentBody io.ReadCloser
contentLength int64 contentLength int64
contentSha256Bytes []byte contentSHA256Bytes []byte
contentMD5Bytes []byte contentMD5Bytes []byte
} }
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
// For anonymous requests return here.
if c.anonymous {
return
}
// Handle if Signature V2.
if c.signature.isV2() {
// Set a temporary redacted auth
req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
return
}
/// Signature V4 authorization header.
// Save the original auth.
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
req.Header.Set("Authorization", newAuth)
return
}
// dumpHTTP - dump HTTP request and response. // dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump. // Starts http dump.
@ -214,6 +258,9 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
return err return err
} }
// Filter out Signature field from Authorization header.
c.filterSignature(req)
// Only display request header. // Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false) reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil { if err != nil {
@ -227,11 +274,22 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
} }
// Only display response header. // Only display response header.
respTrace, err := httputil.DumpResponse(resp, false) var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil { if err != nil {
return err return err
} }
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
// Write response to trace output. // Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil { if err != nil {
@ -328,11 +386,12 @@ func (c Client) newRequest(method string, metadata requestMetadata) (*http.Reque
// Set sha256 sum only for non anonymous credentials. // Set sha256 sum only for non anonymous credentials.
if !c.anonymous { if !c.anonymous {
// set sha256 sum for signature calculation only with signature version '4'. // set sha256 sum for signature calculation only with
// signature version '4'.
if c.signature.isV4() { if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
if metadata.contentSha256Bytes != nil { if metadata.contentSHA256Bytes != nil {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSha256Bytes)) req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes))
} }
} }
} }
@ -356,6 +415,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (*http.Reque
return req, nil return req, nil
} }
// set User agent.
func (c Client) setUserAgent(req *http.Request) { func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent) req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" { if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
@ -363,12 +423,15 @@ func (c Client) setUserAgent(req *http.Request) {
} }
} }
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) { func (c Client) makeTargetURL(bucketName, objectName string, queryValues url.Values) (*url.URL, error) {
urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/" urlStr := c.endpointURL.Scheme + "://" + c.endpointURL.Host + "/"
// Make URL only if bucketName is available, otherwise use the endpoint URL. // Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" { if bucketName != "" {
// If endpoint supports virtual host style use that always. // If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support this. // Currently only S3 and Google Cloud Storage would support
// this.
if isVirtualHostSupported(c.endpointURL) { if isVirtualHostSupported(c.endpointURL) {
urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/" urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + c.endpointURL.Host + "/"
if objectName != "" { if objectName != "" {
@ -403,21 +466,17 @@ type CloudStorageClient interface {
SetBucketACL(bucketName string, cannedACL BucketACL) error SetBucketACL(bucketName string, cannedACL BucketACL) error
GetBucketACL(bucketName string) (BucketACL, error) GetBucketACL(bucketName string) (BucketACL, error)
ListBuckets() ([]BucketStat, error) ListBuckets() ([]BucketInfo, error)
ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectStat ListObjects(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo
ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartStat ListIncompleteUploads(bucket, prefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo
// Object Read/Write/Stat operations. // Object Read/Write/Stat operations.
GetObject(bucketName, objectName string) (reader io.ReadCloser, stat ObjectStat, err error) GetObject(bucketName, objectName string) (reader *Object, err error)
PutObject(bucketName, objectName string, data io.Reader, size int64, contentType string) (n int64, err error) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error)
StatObject(bucketName, objectName string) (ObjectStat, error) StatObject(bucketName, objectName string) (ObjectInfo, error)
RemoveObject(bucketName, objectName string) error RemoveObject(bucketName, objectName string) error
RemoveIncompleteUpload(bucketName, objectName string) error RemoveIncompleteUpload(bucketName, objectName string) error
// Object Read/Write for sparse upload.
GetObjectPartial(bucketName, objectName string) (reader ReadAtCloser, stat ObjectStat, err error)
PutObjectPartial(bucketName, objectName string, data ReadAtCloser, size int64, contentType string) (n int64, err error)
// File to Object API. // File to Object API.
FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error)
FGetObject(bucketName, objectName, filePath string) error FGetObject(bucketName, objectName, filePath string) error

View file

@ -0,0 +1,751 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio_test
import (
"bytes"
crand "crypto/rand"
"errors"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"testing"
"time"
"github.com/minio/minio-go"
)
// Tests removing partially uploaded objects.
func TestRemovePartiallyUploadedV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping function tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.NewV2(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, 128*1024)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
}
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
}()
objectName := bucketName + "-resumable"
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
if err.Error() != "Proactively closed to be verified later." {
t.Fatal("Error:", err)
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable file based put object multipart upload.
func TestResumableFPutObjectV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
file, err := ioutil.TempFile(os.TempDir(), "resumable")
if err != nil {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
objectName := bucketName + "-resumable"
n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
}
// Close the file pro-actively for windows.
file.Close()
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = os.Remove(file.Name())
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable put object multipart upload.
func TestResumablePutObjectV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// generate 11MB
buf := make([]byte, 11*1024*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
objectName := bucketName + "-resumable"
reader := bytes.NewReader(buf)
n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderSeeker interface methods.
func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
t.Fatal("Error:", err, offset)
}
if n != offset {
t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
offset, n)
}
n, err = r.Seek(0, 1)
if err != nil {
t.Fatal("Error:", err)
}
if n != offset {
t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
offset, n)
}
_, err = r.Seek(offset, 2)
if err == nil {
t.Fatal("Error: seek on positive offset for whence '2' should error out")
}
n, err = r.Seek(-offset, 2)
if err != nil {
t.Fatal("Error:", err)
}
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
var buffer bytes.Buffer
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
t.Fatal("Error:", err)
}
if !bytes.Equal(buf, buffer.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderAt interface methods.
func TestGetObjectReadAtFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
// Read directly
buf2 := make([]byte, 512)
buf3 := make([]byte, 512)
buf4 := make([]byte, 512)
m, err := r.ReadAt(buf2, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf2), offset)
}
if m != len(buf2) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
}
if !bytes.Equal(buf2, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf3, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf3), offset)
}
if m != len(buf3) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
}
if !bytes.Equal(buf3, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
offset += 512
m, err = r.ReadAt(buf4, offset)
if err != nil {
t.Fatal("Error:", err, st.Size, len(buf4), offset)
}
if m != len(buf4) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
}
if !bytes.Equal(buf4, buf[offset:offset+512]) {
t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
}
buf5 := make([]byte, n)
// Read the whole object.
m, err = r.ReadAt(buf5, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf5))
}
}
if m != len(buf5) {
t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
}
if !bytes.Equal(buf, buf5) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
buf6 := make([]byte, n+1)
// Read the whole object and beyond.
_, err = r.ReadAt(buf6, 0)
if err != nil {
if err != io.EOF {
t.Fatal("Error:", err, len(buf6))
}
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests comprehensive list of all methods.
func TestFunctionalV2(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable to debug
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
file, err := os.Create(fileName)
if err != nil {
t.Fatal("Error:", err)
}
var totalSize int64
for i := 0; i < 3; i++ {
buf := make([]byte, rand.Intn(1<<19))
n, err := file.Write(buf)
if err != nil {
t.Fatal("Error:", err)
}
totalSize += int64(n)
}
file.Close()
// Verify if bucket exits and you have access.
err = c.BucketExists(bucketName)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Make the bucket 'public read/write'.
err = c.SetBucketACL(bucketName, "public-read-write")
if err != nil {
t.Fatal("Error:", err)
}
// Get the previously set acl.
acl, err := c.GetBucketACL(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
// ACL must be 'public read/write'.
if acl != minio.BucketACL("public-read-write") {
t.Fatal("Error:", acl)
}
// List all buckets.
buckets, err := c.ListBuckets()
if len(buckets) == 0 {
t.Fatal("Error: list buckets cannot be empty", buckets)
}
if err != nil {
t.Fatal("Error:", err)
}
// Verify if previously created bucket is listed in list buckets.
bucketFound := false
for _, bucket := range buckets {
if bucket.Name == bucketName {
bucketFound = true
}
}
// If bucket not found error out.
if !bucketFound {
t.Fatal("Error: bucket ", bucketName, "not found")
}
objectName := bucketName + "unique"
// Generate data
buf := make([]byte, rand.Intn(1<<19))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil {
t.Fatal("Error: ", err)
}
if n != int64(len(buf)) {
t.Fatal("Error: bad length ", n, len(buf))
}
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName+"-nolength")
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Instantiate a done channel to close all listing.
doneCh := make(chan struct{})
defer close(doneCh)
objFound := false
isRecursive := true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
break
}
}
if !incompObjNotFound {
t.Fatal("Error: unexpected dangling incomplete upload found.")
}
newReader, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err := ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.FGetObject(bucketName, objectName, fileName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
resp, err := http.Get(presignedGetURL)
if err != nil {
t.Fatal("Error: ", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatal("Error: ", resp.Status)
}
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newPresignedBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
if err != nil {
t.Fatal("Error: ", err)
}
buf = make([]byte, rand.Intn(1<<20))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
req, err := http.NewRequest("PUT", presignedPutURL, bytes.NewReader(buf))
if err != nil {
t.Fatal("Error: ", err)
}
httpClient := &http.Client{}
resp, err = httpClient.Do(req)
if err != nil {
t.Fatal("Error: ", err)
}
newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
newReadBytes, err = ioutil.ReadAll(newReader)
if err != nil {
t.Fatal("Error: ", err)
}
if !bytes.Equal(newReadBytes, buf) {
t.Fatal("Error: bytes mismatch.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-nolength")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveObject(bucketName, objectName+"-presigned")
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err == nil {
t.Fatal("Error:")
}
if err.Error() != "The specified bucket does not exist" {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName); err != nil {
t.Fatal("Error: ", err)
}
if err = os.Remove(fileName + "-f"); err != nil {
t.Fatal("Error: ", err)
}
}

View file

@ -19,6 +19,7 @@ package minio_test
import ( import (
"bytes" "bytes"
crand "crypto/rand" crand "crypto/rand"
"errors"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
@ -54,9 +55,10 @@ func randString(n int, src rand.Source) string {
return string(b[0:30]) return string(b[0:30])
} }
func TestResumableFPutObject(t *testing.T) { // Tests removing partially uploaded objects.
func TestRemovePartiallyUploaded(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping resumable tests with short runs") t.Skip("skipping function tests for short runs")
} }
// Seed random based on current time. // Seed random based on current time.
@ -64,9 +66,9 @@ func TestResumableFPutObject(t *testing.T) {
// Connect and make sure bucket exists. // Connect and make sure bucket exists.
c, err := minio.New( c, err := minio.New(
"play.minio.io:9002", "s3.amazonaws.com",
"Q3AM3UQ867SPQQA43P2F", os.Getenv("ACCESS_KEY"),
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", os.Getenv("SECRET_KEY"),
false, false,
) )
if err != nil { if err != nil {
@ -77,12 +79,78 @@ func TestResumableFPutObject(t *testing.T) {
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout. // Enable tracing, write to stdout.
// c.TraceOn(nil) // c.TraceOn(os.Stderr)
// Generate a new random bucket name. // Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket. // Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, 128*1024)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
}
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
}()
objectName := bucketName + "-resumable"
_, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err == nil {
t.Fatal("Error: PutObject should fail.")
}
if err.Error() != "Proactively closed to be verified later." {
t.Fatal("Error:", err)
}
err = c.RemoveIncompleteUpload(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests resumable file based put object multipart upload.
func TestResumableFPutObject(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(os.Stderr)
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1") err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
@ -93,7 +161,10 @@ func TestResumableFPutObject(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
n, _ := io.CopyN(file, crand.Reader, 11*1024*1024) n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
if n != int64(11*1024*1024) { if n != int64(11*1024*1024) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
} }
@ -127,9 +198,10 @@ func TestResumableFPutObject(t *testing.T) {
} }
} }
// Tests resumable put object multipart upload.
func TestResumablePutObject(t *testing.T) { func TestResumablePutObject(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("skipping resumable tests with short runs") t.Skip("skipping functional tests for the short runs")
} }
// Seed random based on current time. // Seed random based on current time.
@ -137,31 +209,31 @@ func TestResumablePutObject(t *testing.T) {
// Connect and make sure bucket exists. // Connect and make sure bucket exists.
c, err := minio.New( c, err := minio.New(
"play.minio.io:9002", "s3.amazonaws.com",
"Q3AM3UQ867SPQQA43P2F", os.Getenv("ACCESS_KEY"),
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", os.Getenv("SECRET_KEY"),
false, false,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent. // Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(nil)
// Generate a new random bucket name. // Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket. // Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1") err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
// generate 11MB // Generate 11MB
buf := make([]byte, 11*1024*1024) buf := make([]byte, 11*1024*1024)
_, err = io.ReadFull(crand.Reader, buf) _, err = io.ReadFull(crand.Reader, buf)
@ -171,7 +243,7 @@ func TestResumablePutObject(t *testing.T) {
objectName := bucketName + "-resumable" objectName := bucketName + "-resumable"
reader := bytes.NewReader(buf) reader := bytes.NewReader(buf)
n, err := c.PutObject(bucketName, objectName, reader, int64(reader.Len()), "application/octet-stream") n, err := c.PutObject(bucketName, objectName, reader, "application/octet-stream")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -190,37 +262,42 @@ func TestResumablePutObject(t *testing.T) {
} }
} }
func TestGetObjectPartialFunctional(t *testing.T) { // Tests get object ReaderSeeker interface methods.
func TestGetObjectReadSeekFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for short runs")
}
// Seed random based on current time. // Seed random based on current time.
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists. // Connect and make sure bucket exists.
c, err := minio.New( c, err := minio.New(
"play.minio.io:9002", "s3.amazonaws.com",
"Q3AM3UQ867SPQQA43P2F", os.Getenv("ACCESS_KEY"),
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", os.Getenv("SECRET_KEY"),
false, false,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent. // Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(nil)
// Generate a new random bucket name. // Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket. // Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1") err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
// generate data more than 32K // Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024) buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf) _, err = io.ReadFull(crand.Reader, buf)
@ -228,9 +305,123 @@ func TestGetObjectPartialFunctional(t *testing.T) {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// save the data // Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano())) objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "binary/octet-stream") n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if n != int64(len(buf)) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
// Read the data back
r, err := c.GetObject(bucketName, objectName)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size)
}
offset := int64(2048)
n, err = r.Seek(offset, 0)
if err != nil {
t.Fatal("Error:", err, offset)
}
if n != offset {
t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
offset, n)
}
n, err = r.Seek(0, 1)
if err != nil {
t.Fatal("Error:", err)
}
if n != offset {
t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
offset, n)
}
_, err = r.Seek(offset, 2)
if err == nil {
t.Fatal("Error: seek on positive offset for whence '2' should error out")
}
n, err = r.Seek(-offset, 2)
if err != nil {
t.Fatal("Error:", err)
}
if n != 0 {
t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
}
var buffer bytes.Buffer
if _, err = io.CopyN(&buffer, r, st.Size); err != nil {
t.Fatal("Error:", err)
}
if !bytes.Equal(buf, buffer.Bytes()) {
t.Fatal("Error: Incorrect read bytes v/s original buffer.")
}
err = c.RemoveObject(bucketName, objectName)
if err != nil {
t.Fatal("Error: ", err)
}
err = c.RemoveBucket(bucketName)
if err != nil {
t.Fatal("Error:", err)
}
}
// Tests get object ReaderAt interface methods.
func TestGetObjectReadAtFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time.
rand.Seed(time.Now().Unix())
// Connect and make sure bucket exists.
c, err := minio.New(
"s3.amazonaws.com",
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
false,
)
if err != nil {
t.Fatal("Error:", err)
}
// Enable tracing, write to stderr.
// c.TraceOn(os.Stderr)
// Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
@ -240,11 +431,15 @@ func TestGetObjectPartialFunctional(t *testing.T) {
} }
// read the data back // read the data back
r, st, err := c.GetObjectPartial(bucketName, objectName) r, err := c.GetObject(bucketName, objectName)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName) t.Fatal("Error:", err, bucketName, objectName)
} }
st, err := r.Stat()
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
if st.Size != int64(len(buf)) { if st.Size != int64(len(buf)) {
t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n", t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
len(buf), st.Size) len(buf), st.Size)
@ -323,36 +518,41 @@ func TestGetObjectPartialFunctional(t *testing.T) {
} }
} }
// Tests comprehensive list of all methods.
func TestFunctional(t *testing.T) { func TestFunctional(t *testing.T) {
if testing.Short() {
t.Skip("skipping functional tests for the short runs")
}
// Seed random based on current time. // Seed random based on current time.
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
c, err := minio.New( c, err := minio.New(
"play.minio.io:9002", "s3.amazonaws.com",
"Q3AM3UQ867SPQQA43P2F", os.Getenv("ACCESS_KEY"),
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", os.Getenv("SECRET_KEY"),
false, false,
) )
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// Enable to debug
// c.TraceOn(os.Stderr)
// Set user agent. // Set user agent.
c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
// Enable tracing, write to stdout.
// c.TraceOn(nil)
// Generate a new random bucket name. // Generate a new random bucket name.
bucketName := randString(60, rand.NewSource(time.Now().UnixNano())) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
// make a new bucket. // Make a new bucket.
err = c.MakeBucket(bucketName, "private", "us-east-1") err = c.MakeBucket(bucketName, "private", "us-east-1")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
// generate a random file name. // Generate a random file name.
fileName := randString(60, rand.NewSource(time.Now().UnixNano())) fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
file, err := os.Create(fileName) file, err := os.Create(fileName)
if err != nil { if err != nil {
@ -369,31 +569,34 @@ func TestFunctional(t *testing.T) {
} }
file.Close() file.Close()
// verify if bucket exits and you have access. // Verify if bucket exits and you have access.
err = c.BucketExists(bucketName) err = c.BucketExists(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName) t.Fatal("Error:", err, bucketName)
} }
// make the bucket 'public read/write'. // Make the bucket 'public read/write'.
err = c.SetBucketACL(bucketName, "public-read-write") err = c.SetBucketACL(bucketName, "public-read-write")
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// get the previously set acl. // Get the previously set acl.
acl, err := c.GetBucketACL(bucketName) acl, err := c.GetBucketACL(bucketName)
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
// acl must be 'public read/write'. // ACL must be 'public read/write'.
if acl != minio.BucketACL("public-read-write") { if acl != minio.BucketACL("public-read-write") {
t.Fatal("Error:", acl) t.Fatal("Error:", acl)
} }
// list all buckets. // List all buckets.
buckets, err := c.ListBuckets() buckets, err := c.ListBuckets()
if len(buckets) == 0 {
t.Fatal("Error: list buckets cannot be empty", buckets)
}
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
@ -413,14 +616,14 @@ func TestFunctional(t *testing.T) {
objectName := bucketName + "unique" objectName := bucketName + "unique"
// generate data // Generate data
buf := make([]byte, rand.Intn(1<<19)) buf := make([]byte, rand.Intn(1<<19))
_, err = io.ReadFull(crand.Reader, buf) _, err = io.ReadFull(crand.Reader, buf)
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "") n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -428,7 +631,7 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: bad length ", n, len(buf)) t.Fatal("Error: bad length ", n, len(buf))
} }
n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), -1, "binary/octet-stream") n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
if err != nil { if err != nil {
t.Fatal("Error:", err, bucketName, objectName+"-nolength") t.Fatal("Error:", err, bucketName, objectName+"-nolength")
} }
@ -437,7 +640,34 @@ func TestFunctional(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n) t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
} }
newReader, _, err := c.GetObject(bucketName, objectName) // Instantiate a done channel to close all listing.
doneCh := make(chan struct{})
defer close(doneCh)
objFound := false
isRecursive := true // Recursive is true.
for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
if obj.Key == objectName {
objFound = true
break
}
}
if !objFound {
t.Fatal("Error: object " + objectName + " not found.")
}
incompObjNotFound := true
for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
if objIncompl.Key != "" {
incompObjNotFound = false
break
}
}
if !incompObjNotFound {
t.Fatal("Error: unexpected dangling incomplete upload found.")
}
newReader, err := c.GetObject(bucketName, objectName)
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -451,15 +681,7 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: bytes mismatch.") t.Fatal("Error: bytes mismatch.")
} }
n, err = c.FPutObject(bucketName, objectName+"-f", fileName, "text/plain") err = c.FGetObject(bucketName, objectName, fileName+"-f")
if err != nil {
t.Fatal("Error: ", err)
}
if n != totalSize {
t.Fatal("Error: bad length ", n, totalSize)
}
err = c.FGetObject(bucketName, objectName+"-f", fileName+"-f")
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -503,7 +725,7 @@ func TestFunctional(t *testing.T) {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
newReader, _, err = c.GetObject(bucketName, objectName+"-presigned") newReader, err = c.GetObject(bucketName, objectName+"-presigned")
if err != nil { if err != nil {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
@ -537,11 +759,11 @@ func TestFunctional(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Error:", err) t.Fatal("Error:", err)
} }
err = c.RemoveBucket("bucket1") err = c.RemoveBucket(bucketName)
if err == nil { if err == nil {
t.Fatal("Error:") t.Fatal("Error:")
} }
if err.Error() != "The specified bucket does not exist." { if err.Error() != "The specified bucket does not exist" {
t.Fatal("Error: ", err) t.Fatal("Error: ", err)
} }
if err = os.Remove(fileName); err != nil { if err = os.Remove(fileName); err != nil {

View file

@ -17,11 +17,126 @@
package minio package minio
import ( import (
"fmt"
"net/http"
"net/url" "net/url"
"strings"
"testing" "testing"
) )
func TestSignature(t *testing.T) { func TestEncodeURL2Path(t *testing.T) {
type urlStrings struct {
objName string
encodedObjName string
}
bucketName := "bucketName"
want := []urlStrings{
{
objName: "本語",
encodedObjName: "%E6%9C%AC%E8%AA%9E",
},
{
objName: "本語.1",
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
},
{
objName: ">123>3123123",
encodedObjName: "%3E123%3E3123123",
},
{
objName: "test 1 2.txt",
encodedObjName: "test%201%202.txt",
},
{
objName: "test++ 1.txt",
encodedObjName: "test%2B%2B%201.txt",
},
}
for _, o := range want {
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
if err != nil {
t.Fatal("Error:", err)
}
urlPath := "/" + bucketName + "/" + o.encodedObjName
if urlPath != encodeURL2Path(u) {
t.Fatal("Error")
}
}
}
func TestErrorResponse(t *testing.T) {
var err error
err = ErrorResponse{
Code: "Testing",
}
errResp := ToErrorResponse(err)
if errResp.Code != "Testing" {
t.Fatal("Type conversion failed, we have an empty struct.")
}
// Test http response decoding.
var httpResponse *http.Response
// Set empty variables
httpResponse = nil
var bucketName, objectName string
// Should fail with invalid argument.
err = HTTPRespToErrorResponse(httpResponse, bucketName, objectName)
errResp = ToErrorResponse(err)
if errResp.Code != "InvalidArgument" {
t.Fatal("Empty response input should return invalid argument.")
}
}
func TestSignatureCalculation(t *testing.T) {
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
if err != nil {
t.Fatal("Error:", err)
}
req = SignV4(*req, "", "", "us-east-1")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV4(*req, "", "", "us-east-1", 0)
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV2(*req, "", "")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV2(*req, "", "", 0)
if strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: normal credentials should have Signature query resource.")
}
req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
if !strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: normal credentials should not have Signature query resource.")
}
}
func TestSignatureType(t *testing.T) {
clnt := Client{} clnt := Client{}
if !clnt.signature.isV4() { if !clnt.signature.isV4() {
t.Fatal("Error") t.Fatal("Error")

View file

@ -26,8 +26,8 @@ build_script:
- gofmt -s -l . - gofmt -s -l .
- golint github.com/minio/minio-go... - golint github.com/minio/minio-go...
- deadcode - deadcode
- go test - go test -short -v
- go test -test.short -race - go test -short -race -v
# to disable automatic tests # to disable automatic tests
test: off test: off

View file

@ -16,7 +16,7 @@
package minio package minio
// BucketACL - bucket level access control. // BucketACL - Bucket level access control.
type BucketACL string type BucketACL string
// Different types of ACL's currently supported for buckets. // Different types of ACL's currently supported for buckets.
@ -35,7 +35,7 @@ func (b BucketACL) String() string {
return string(b) return string(b)
} }
// isValidBucketACL - is provided acl string supported. // isValidBucketACL - Is provided acl string supported.
func (b BucketACL) isValidBucketACL() bool { func (b BucketACL) isValidBucketACL() bool {
switch true { switch true {
case b.isPrivate(): case b.isPrivate():
@ -47,29 +47,29 @@ func (b BucketACL) isValidBucketACL() bool {
case b.isAuthenticated(): case b.isAuthenticated():
return true return true
case b.String() == "private": case b.String() == "private":
// by default its "private" // By default its "private"
return true return true
default: default:
return false return false
} }
} }
// isPrivate - is acl Private. // isPrivate - Is acl Private.
func (b BucketACL) isPrivate() bool { func (b BucketACL) isPrivate() bool {
return b == bucketPrivate return b == bucketPrivate
} }
// isPublicRead - is acl PublicRead. // isPublicRead - Is acl PublicRead.
func (b BucketACL) isReadOnly() bool { func (b BucketACL) isReadOnly() bool {
return b == bucketReadOnly return b == bucketReadOnly
} }
// isPublicReadWrite - is acl PublicReadWrite. // isPublicReadWrite - Is acl PublicReadWrite.
func (b BucketACL) isPublic() bool { func (b BucketACL) isPublic() bool {
return b == bucketPublic return b == bucketPublic
} }
// isAuthenticated - is acl AuthenticatedRead. // isAuthenticated - Is acl AuthenticatedRead.
func (b BucketACL) isAuthenticated() bool { func (b BucketACL) isAuthenticated() bool {
return b == bucketAuthenticated return b == bucketAuthenticated
} }

View file

@ -24,25 +24,26 @@ import (
"sync" "sync"
) )
// bucketLocationCache provides simple mechansim to hold bucket locations in memory. // bucketLocationCache - Provides simple mechansim to hold bucket
// locations in memory.
type bucketLocationCache struct { type bucketLocationCache struct {
// Mutex is used for handling the concurrent // mutex is used for handling the concurrent
// read/write requests for cache // read/write requests for cache.
sync.RWMutex sync.RWMutex
// items holds the cached bucket locations. // items holds the cached bucket locations.
items map[string]string items map[string]string
} }
// newBucketLocationCache provides a new bucket location cache to be used // newBucketLocationCache - Provides a new bucket location cache to be
// internally with the client object. // used internally with the client object.
func newBucketLocationCache() *bucketLocationCache { func newBucketLocationCache() *bucketLocationCache {
return &bucketLocationCache{ return &bucketLocationCache{
items: make(map[string]string), items: make(map[string]string),
} }
} }
// Get returns a value of a given key if it exists // Get - Returns a value of a given key if it exists.
func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
r.RLock() r.RLock()
defer r.RUnlock() defer r.RUnlock()
@ -50,21 +51,21 @@ func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool)
return return
} }
// Set will persist a value to the cache // Set - Will persist a value into cache.
func (r *bucketLocationCache) Set(bucketName string, location string) { func (r *bucketLocationCache) Set(bucketName string, location string) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
r.items[bucketName] = location r.items[bucketName] = location
} }
// Delete deletes a bucket name. // Delete - Deletes a bucket name from cache.
func (r *bucketLocationCache) Delete(bucketName string) { func (r *bucketLocationCache) Delete(bucketName string) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
delete(r.items, bucketName) delete(r.items, bucketName)
} }
// getBucketLocation - get location for the bucketName from location map cache. // getBucketLocation - Get location for the bucketName from location map cache.
func (c Client) getBucketLocation(bucketName string) (string, error) { func (c Client) getBucketLocation(bucketName string) (string, error) {
// For anonymous requests, default to "us-east-1" and let other calls // For anonymous requests, default to "us-east-1" and let other calls
// move forward. // move forward.
@ -101,12 +102,12 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
} }
location := locationConstraint location := locationConstraint
// location is empty will be 'us-east-1'. // Location is empty will be 'us-east-1'.
if location == "" { if location == "" {
location = "us-east-1" location = "us-east-1"
} }
// location can be 'EU' convert it to meaningful 'eu-west-1'. // Location can be 'EU' convert it to meaningful 'eu-west-1'.
if location == "EU" { if location == "EU" {
location = "eu-west-1" location = "eu-west-1"
} }
@ -118,7 +119,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return location, nil return location, nil
} }
// getBucketLocationRequest wrapper creates a new getBucketLocation request. // getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
// Set location query. // Set location query.
urlValues := make(url.Values) urlValues := make(url.Values)
@ -129,16 +130,16 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
targetURL.Path = filepath.Join(bucketName, "") targetURL.Path = filepath.Join(bucketName, "")
targetURL.RawQuery = urlValues.Encode() targetURL.RawQuery = urlValues.Encode()
// get a new HTTP request for the method. // Get a new HTTP request for the method.
req, err := http.NewRequest("GET", targetURL.String(), nil) req, err := http.NewRequest("GET", targetURL.String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// set UserAgent for the request. // Set UserAgent for the request.
c.setUserAgent(req) c.setUserAgent(req)
// set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() { if c.signature.isV4() {
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
} }

View file

@ -1,52 +0,0 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/xml"
"io"
)
// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
return d.Decode(v)
}
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumMD5 calculate md5 sum for an input byte array.
func sumMD5(data []byte) []byte {
hash := md5.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}

View file

@ -25,14 +25,18 @@ const minimumPartSize = 1024 * 1024 * 5
// maxParts - maximum parts for a single multipart session. // maxParts - maximum parts for a single multipart session.
const maxParts = 10000 const maxParts = 10000
// maxPartSize - maximum part size 5GiB for a single multipart upload operation. // maxPartSize - maximum part size 5GiB for a single multipart upload
// operation.
const maxPartSize = 1024 * 1024 * 1024 * 5 const maxPartSize = 1024 * 1024 * 1024 * 5
// maxSinglePutObjectSize - maximum size 5GiB of object per PUT operation. // maxSinglePutObjectSize - maximum size 5GiB of object per PUT
// operation.
const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// maxMultipartPutObjectSize - maximum size 5TiB of object for Multipart operation. // maxMultipartPutObjectSize - maximum size 5TiB of object for
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadAtBufferSize - optimal buffer 5MiB used for reading through ReadAt operation. // optimalReadAtBufferSize - optimal buffer 5MiB used for reading
// through ReadAt operation.
const optimalReadAtBufferSize = 1024 * 1024 * 5 const optimalReadAtBufferSize = 1024 * 1024 * 5

View file

@ -29,28 +29,40 @@ import (
func main() { func main() {
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values. // Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
// Requests are always secure by default. set inSecure=true to enable insecure access. // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// inSecure boolean is the last argument for New(). // This boolean value is the last argument for New().
// New provides a client object backend by automatically detected signature type based // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// on the provider. // determined based on the Endpoint value.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false) s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
defer reader.Close()
localfile, err := os.Create("my-testfile") reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.Create("my-testfile")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
defer localfile.Close() defer localfile.Close()
if _, err = io.Copy(localfile, reader); err != nil { stat, err := reader.Stat()
if err != nil {
log.Fatalln(err)
}
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
} }

View file

@ -1,91 +0,0 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"errors"
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
st, err := localFile.Stat()
if err != nil {
log.Fatalln(err)
}
readAtOffset := st.Size()
readAtBuffer := make([]byte, 5*1024*1024)
// Loop and write.
for {
readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
log.Fatalln(rerr)
}
}
writeSize, werr := localFile.Write(readAtBuffer[:readAtSize])
if werr != nil {
log.Fatalln(werr)
}
if readAtSize != writeSize {
log.Fatalln(errors.New("Something really bad happened here."))
}
readAtOffset += int64(writeSize)
if rerr == io.EOF {
break
}
}
// totalWritten size.
totalWritten := readAtOffset
// If found mismatch error out.
if totalWritten != stat.Size {
log.Fatalln(errors.New("Something really bad happened here."))
}
}

View file

@ -44,8 +44,7 @@ func main() {
} }
defer object.Close() defer object.Close()
st, _ := object.Stat() n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -1,56 +0,0 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: my-bucketname, my-objectname and my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("play.minio.io:9002", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", false)
if err != nil {
log.Fatalln(err)
}
localFile, err := os.Open("testfile")
if err != nil {
log.Fatalln(err)
}
st, err := localFile.Stat()
if err != nil {
log.Fatalln(err)
}
defer localFile.Close()
_, err = s3Client.PutObjectPartial("bucket-name", "objectName", localFile, st.Size(), "text/plain")
if err != nil {
log.Fatalln(err)
}
}

View file

@ -35,23 +35,29 @@ func main() {
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value. // determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false) s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
reader, _, err := s3Client.GetObject("my-bucketname", "my-objectname") reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
defer reader.Close()
localfile, err := os.Create("my-testfile") localFile, err := os.Create("my-testfile")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
defer localfile.Close() defer localfile.Close()
if _, err = io.Copy(localfile, reader); err != nil { stat, err := reader.Stat()
if err != nil {
log.Fatalln(err)
}
if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
} }

View file

@ -1,92 +0,0 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"errors"
"io"
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
// my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
if err != nil {
log.Fatalln(err)
}
reader, stat, err := s3Client.GetObjectPartial("my-bucketname", "my-objectname")
if err != nil {
log.Fatalln(err)
}
defer reader.Close()
localFile, err := os.OpenFile("my-testfile", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatalln(err)
}
defer localfile.Close()
st, err := localFile.Stat()
if err != nil {
log.Fatalln(err)
}
readAtOffset := st.Size()
readAtBuffer := make([]byte, 5*1024*1024)
// For loop.
for {
readAtSize, rerr := reader.ReadAt(readAtBuffer, readAtOffset)
if rerr != nil {
if rerr != io.EOF {
log.Fatalln(rerr)
}
}
writeSize, werr := localFile.Write(readAtBuffer[:readAtSize])
if werr != nil {
log.Fatalln(werr)
}
if readAtSize != writeSize {
log.Fatalln(errors.New("Something really bad happened here."))
}
readAtOffset += int64(writeSize)
if rerr == io.EOF {
break
}
}
// totalWritten size.
totalWritten := readAtOffset
// If found mismatch error out.
if totalWritten != stat.Size {
log.Fatalln(errors.New("Something really bad happened here."))
}
}

View file

@ -45,8 +45,7 @@ func main() {
} }
defer object.Close() defer object.Close()
st, _ := object.Stat() n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, st.Size(), "application/octet-stream")
if err != nil { if err != nil {
log.Fatalln(err) log.Fatalln(err)
} }

View file

@ -1,57 +0,0 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
// my-testfile are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
if err != nil {
log.Fatalln(err)
}
localFile, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
st, err := localFile.Stat()
if err != nil {
log.Fatalln(err)
}
defer localFile.Close()
_, err = s3Client.PutObjectPartial("my-bucketname", "my-objectname", localFile, st.Size(), "text/plain")
if err != nil {
log.Fatalln(err)
}
}

View file

@ -2,7 +2,6 @@ package minio
import ( import (
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@ -11,7 +10,8 @@ import (
// expirationDateFormat date format for expiration key in json policy. // expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z" const expirationDateFormat = "2006-01-02T15:04:05.999Z"
// policyCondition explanation: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// //
// Example: // Example:
// //
@ -27,11 +27,15 @@ type policyCondition struct {
value string value string
} }
// PostPolicy provides strict static type conversion and validation for Amazon S3's POST policy JSON string. // PostPolicy - Provides strict static type conversion and validation
// for Amazon S3's POST policy JSON string.
type PostPolicy struct { type PostPolicy struct {
expiration time.Time // expiration date and time of the POST policy. // Expiration date and time of the POST policy.
conditions []policyCondition // collection of different policy conditions. expiration time.Time
// contentLengthRange minimum and maximum allowable size for the uploaded content. // Collection of different policy conditions.
conditions []policyCondition
// ContentLengthRange minimum and maximum allowable size for the
// uploaded content.
contentLengthRange struct { contentLengthRange struct {
min int64 min int64
max int64 max int64
@ -41,7 +45,7 @@ type PostPolicy struct {
formData map[string]string formData map[string]string
} }
// NewPostPolicy instantiate new post policy. // NewPostPolicy - Instantiate new post policy.
func NewPostPolicy() *PostPolicy { func NewPostPolicy() *PostPolicy {
p := &PostPolicy{} p := &PostPolicy{}
p.conditions = make([]policyCondition, 0) p.conditions = make([]policyCondition, 0)
@ -49,19 +53,19 @@ func NewPostPolicy() *PostPolicy {
return p return p
} }
// SetExpires expiration time. // SetExpires - Sets expiration time for the new policy.
func (p *PostPolicy) SetExpires(t time.Time) error { func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() { if t.IsZero() {
return errors.New("No expiry time set.") return ErrInvalidArgument("No expiry time set.")
} }
p.expiration = t p.expiration = t
return nil return nil
} }
// SetKey Object name. // SetKey - Sets an object name for the policy based upload.
func (p *PostPolicy) SetKey(key string) error { func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" { if strings.TrimSpace(key) == "" || key == "" {
return errors.New("Object name is not specified.") return ErrInvalidArgument("Object name is empty.")
} }
policyCond := policyCondition{ policyCond := policyCondition{
matchType: "eq", matchType: "eq",
@ -75,10 +79,11 @@ func (p *PostPolicy) SetKey(key string) error {
return nil return nil
} }
// SetKeyStartsWith Object name that can start with. // SetKeyStartsWith - Sets an object name that an policy based upload
// can start with.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return errors.New("Object prefix is not specified.") return ErrInvalidArgument("Object prefix is empty.")
} }
policyCond := policyCondition{ policyCond := policyCondition{
matchType: "starts-with", matchType: "starts-with",
@ -92,10 +97,10 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
return nil return nil
} }
// SetBucket bucket name. // SetBucket - Sets bucket at which objects will be uploaded to.
func (p *PostPolicy) SetBucket(bucketName string) error { func (p *PostPolicy) SetBucket(bucketName string) error {
if strings.TrimSpace(bucketName) == "" || bucketName == "" { if strings.TrimSpace(bucketName) == "" || bucketName == "" {
return errors.New("Bucket name is not specified.") return ErrInvalidArgument("Bucket name is empty.")
} }
policyCond := policyCondition{ policyCond := policyCondition{
matchType: "eq", matchType: "eq",
@ -109,10 +114,11 @@ func (p *PostPolicy) SetBucket(bucketName string) error {
return nil return nil
} }
// SetContentType content-type. // SetContentType - Sets content-type of the object for this policy
// based upload.
func (p *PostPolicy) SetContentType(contentType string) error { func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" { if strings.TrimSpace(contentType) == "" || contentType == "" {
return errors.New("No content type specified.") return ErrInvalidArgument("No content type specified.")
} }
policyCond := policyCondition{ policyCond := policyCondition{
matchType: "eq", matchType: "eq",
@ -126,16 +132,17 @@ func (p *PostPolicy) SetContentType(contentType string) error {
return nil return nil
} }
// SetContentLengthRange - set new min and max content length condition. // SetContentLengthRange - Set new min and max content length
// condition for all incoming uploads.
func (p *PostPolicy) SetContentLengthRange(min, max int64) error { func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min > max { if min > max {
return errors.New("minimum limit is larger than maximum limit") return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
} }
if min < 0 { if min < 0 {
return errors.New("minimum limit cannot be negative") return ErrInvalidArgument("Minimum limit cannot be negative.")
} }
if max < 0 { if max < 0 {
return errors.New("maximum limit cannot be negative") return ErrInvalidArgument("Maximum limit cannot be negative.")
} }
p.contentLengthRange.min = min p.contentLengthRange.min = min
p.contentLengthRange.max = max p.contentLengthRange.max = max
@ -145,18 +152,18 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
// addNewPolicy - internal helper to validate adding new policies. // addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
return errors.New("Policy fields empty.") return ErrInvalidArgument("Policy fields are empty.")
} }
p.conditions = append(p.conditions, policyCond) p.conditions = append(p.conditions, policyCond)
return nil return nil
} }
// Stringer interface for printing in pretty manner. // Stringer interface for printing policy in json formatted string.
func (p PostPolicy) String() string { func (p PostPolicy) String() string {
return string(p.marshalJSON()) return string(p.marshalJSON())
} }
// marshalJSON provides Marshalled JSON. // marshalJSON - Provides Marshalled JSON in bytes.
func (p PostPolicy) marshalJSON() []byte { func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
var conditionsStr string var conditionsStr string
@ -178,7 +185,7 @@ func (p PostPolicy) marshalJSON() []byte {
return []byte(retStr) return []byte(retStr)
} }
// base64 produces base64 of PostPolicy's Marshalled json. // base64 - Produces base64 of PostPolicy's Marshalled json.
func (p PostPolicy) base64() string { func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON()) return base64.StdEncoding.EncodeToString(p.marshalJSON())
} }

View file

@ -30,7 +30,7 @@ import (
"time" "time"
) )
// signature and API related constants. // Signature and API related constants.
const ( const (
signV2Algorithm = "AWS" signV2Algorithm = "AWS"
) )
@ -55,14 +55,14 @@ func encodeURL2Path(u *url.URL) (path string) {
} }
// PreSignV2 - presign the request in following style. // PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE} // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// presign is a noop for anonymous credentials. // Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" { if accessKeyID == "" || secretAccessKey == "" {
return nil return &req
} }
d := time.Now().UTC() d := time.Now().UTC()
// Add date if not present // Add date if not present.
if date := req.Header.Get("Date"); date == "" { if date := req.Header.Get("Date"); date == "" {
req.Header.Set("Date", d.Format(http.TimeFormat)) req.Header.Set("Date", d.Format(http.TimeFormat))
} }
@ -73,12 +73,12 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
// Find epoch expires when the request will expire. // Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires epochExpires := d.Unix() + expires
// get string to sign. // Get string to sign.
stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign)) hm.Write([]byte(stringToSign))
// calculate signature. // Calculate signature.
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
query := req.URL.Query() query := req.URL.Query()
@ -98,7 +98,8 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
return &req return &req
} }
// PostPresignSignatureV2 - presigned signature for PostPolicy request // PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64)) hm.Write([]byte(policyBase64))
@ -124,6 +125,11 @@ func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
// SignV2 sign the request before Do() (AWS Signature Version 2). // SignV2 sign the request before Do() (AWS Signature Version 2).
func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time. // Initial time.
d := time.Now().UTC() d := time.Now().UTC()
@ -160,11 +166,11 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// CanonicalizedResource; // CanonicalizedResource;
func getStringToSignV2(req http.Request) string { func getStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
// write standard headers. // Write standard headers.
writeDefaultHeaders(buf, req) writeDefaultHeaders(buf, req)
// write canonicalized protocol headers if any. // Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req) writeCanonicalizedHeaders(buf, req)
// write canonicalized Query resources if any. // Write canonicalized Query resources if any.
writeCanonicalizedResource(buf, req) writeCanonicalizedResource(buf, req)
return buf.String() return buf.String()
} }
@ -186,7 +192,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
var protoHeaders []string var protoHeaders []string
vals := make(map[string][]string) vals := make(map[string][]string)
for k, vv := range req.Header { for k, vv := range req.Header {
// all the AMZ and GOOG headers should be lowercase // All the AMZ headers should be lowercase
lk := strings.ToLower(k) lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") { if strings.HasPrefix(lk, "x-amz") {
protoHeaders = append(protoHeaders, lk) protoHeaders = append(protoHeaders, lk)
@ -246,6 +252,7 @@ var resourceList = []string{
// <HTTP-Request-URI, from the protocol name up to the query string> + // <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error { func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error {
// Save request URL.
requestURL := req.URL requestURL := req.URL
// Get encoded URL path. // Get encoded URL path.
@ -256,20 +263,21 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) error {
if requestURL.RawQuery != "" { if requestURL.RawQuery != "" {
var n int var n int
vals, _ := url.ParseQuery(requestURL.RawQuery) vals, _ := url.ParseQuery(requestURL.RawQuery)
// loop through all the supported resourceList. // Verify if any sub resource queries are present, if yes
// canonicallize them.
for _, resource := range resourceList { for _, resource := range resourceList {
if vv, ok := vals[resource]; ok && len(vv) > 0 { if vv, ok := vals[resource]; ok && len(vv) > 0 {
n++ n++
// first element // First element
switch n { switch n {
case 1: case 1:
buf.WriteByte('?') buf.WriteByte('?')
// the rest // The rest
default: default:
buf.WriteByte('&') buf.WriteByte('&')
} }
buf.WriteString(resource) buf.WriteString(resource)
// request parameters // Request parameters
if len(vv[0]) > 0 { if len(vv[0]) > 0 {
buf.WriteByte('=') buf.WriteByte('=')
buf.WriteString(url.QueryEscape(vv[0])) buf.WriteString(url.QueryEscape(vv[0]))

View file

@ -26,7 +26,7 @@ import (
"time" "time"
) )
// signature and API related constants. // Signature and API related constants.
const ( const (
signV4Algorithm = "AWS4-HMAC-SHA256" signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z" iso8601DateFormat = "20060102T150405Z"
@ -34,28 +34,35 @@ const (
) )
/// ///
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. /// Excerpts from @lsegal -
/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
/// ///
/// User-Agent: /// User-Agent:
/// ///
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs /// This is ignored from signing because signing this causes
/// (that are executed by other agents) or when customers pass requests through proxies, which may /// problems with generating pre-signed URLs (that are executed
/// modify the user-agent. /// by other agents) or when customers pass requests through
/// proxies, which may modify the user-agent.
/// ///
/// Content-Length: /// Content-Length:
/// ///
/// This is ignored from signing because generating a pre-signed URL should not provide a content-length /// This is ignored from signing because generating a pre-signed
/// constraint, specifically when vending a S3 pre-signed PUT URL. The corollary to this is that when /// URL should not provide a content-length constraint,
/// sending regular requests (non-pre-signed), the signature contains a checksum of the body, which /// specifically when vending a S3 pre-signed PUT URL. The
/// implicitly validates the payload length (since changing the number of bytes would change the checksum) /// corollary to this is that when sending regular requests
/// (non-pre-signed), the signature contains a checksum of the
/// body, which implicitly validates the payload length (since
/// changing the number of bytes would change the checksum)
/// and therefore this header is not valuable in the signature. /// and therefore this header is not valuable in the signature.
/// ///
/// Content-Type: /// Content-Type:
/// ///
/// Signing this header causes quite a number of problems in browser environments, where browsers /// Signing this header causes quite a number of problems in
/// like to modify and normalize the content-type header in different ways. There is more information /// browser environments, where browsers like to modify and
/// on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding this field simplifies logic /// normalize the content-type header in different ways. There is
/// and reduces the possibility of future bugs /// more information on this in https://goo.gl/2E9gyy. Avoiding
/// this field simplifies logic and reduces the possibility of
/// future bugs.
/// ///
/// Authorization: /// Authorization:
/// ///
@ -68,7 +75,7 @@ var ignoredHeaders = map[string]bool{
"User-Agent": true, "User-Agent": true,
} }
// getSigningKey hmac seed to calculate final signature // getSigningKey hmac seed to calculate final signature.
func getSigningKey(secret, loc string, t time.Time) []byte { func getSigningKey(secret, loc string, t time.Time) []byte {
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
location := sumHMAC(date, []byte(loc)) location := sumHMAC(date, []byte(loc))
@ -77,12 +84,13 @@ func getSigningKey(secret, loc string, t time.Time) []byte {
return signingKey return signingKey
} }
// getSignature final signature in hexadecimal form // getSignature final signature in hexadecimal form.
func getSignature(signingKey []byte, stringToSign string) string { func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
} }
// getScope generate a string of a specific date, an AWS region, and a service // getScope generate a string of a specific date, an AWS region, and a
// service.
func getScope(location string, t time.Time) string { func getScope(location string, t time.Time) string {
scope := strings.Join([]string{ scope := strings.Join([]string{
t.Format(yyyymmdd), t.Format(yyyymmdd),
@ -93,13 +101,14 @@ func getScope(location string, t time.Time) string {
return scope return scope
} }
// getCredential generate a credential string // getCredential generate a credential string.
func getCredential(accessKeyID, location string, t time.Time) string { func getCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t) scope := getScope(location, t)
return accessKeyID + "/" + scope return accessKeyID + "/" + scope
} }
// getHashedPayload get the hexadecimal value of the SHA256 hash of the request payload // getHashedPayload get the hexadecimal value of the SHA256 hash of
// the request payload.
func getHashedPayload(req http.Request) string { func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256") hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" { if hashedPayload == "" {
@ -109,7 +118,8 @@ func getHashedPayload(req http.Request) string {
return hashedPayload return hashedPayload
} }
// getCanonicalHeaders generate a list of request headers for signature. // getCanonicalHeaders generate a list of request headers for
// signature.
func getCanonicalHeaders(req http.Request) string { func getCanonicalHeaders(req http.Request) string {
var headers []string var headers []string
vals := make(map[string][]string) vals := make(map[string][]string)
@ -124,6 +134,8 @@ func getCanonicalHeaders(req http.Request) string {
sort.Strings(headers) sort.Strings(headers)
var buf bytes.Buffer var buf bytes.Buffer
// Save all the headers in canonical form <header>:<value> newline
// separated for each header.
for _, k := range headers { for _, k := range headers {
buf.WriteString(k) buf.WriteString(k)
buf.WriteByte(':') buf.WriteByte(':')
@ -145,12 +157,13 @@ func getCanonicalHeaders(req http.Request) string {
} }
// getSignedHeaders generate all signed request headers. // getSignedHeaders generate all signed request headers.
// i.e alphabetically sorted, semicolon-separated list of lowercase request header names // i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
func getSignedHeaders(req http.Request) string { func getSignedHeaders(req http.Request) string {
var headers []string var headers []string
for k := range req.Header { for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header continue // Ignored header found continue.
} }
headers = append(headers, strings.ToLower(k)) headers = append(headers, strings.ToLower(k))
} }
@ -168,7 +181,6 @@ func getSignedHeaders(req http.Request) string {
// <CanonicalHeaders>\n // <CanonicalHeaders>\n
// <SignedHeaders>\n // <SignedHeaders>\n
// <HashedPayload> // <HashedPayload>
//
func getCanonicalRequest(req http.Request) string { func getCanonicalRequest(req http.Request) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
@ -193,20 +205,21 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
// PreSignV4 presign the request, in accordance with // PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
// presign is a noop for anonymous credentials. // Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" { if accessKeyID == "" || secretAccessKey == "" {
return nil return &req
} }
// Initial time. // Initial time.
t := time.Now().UTC() t := time.Now().UTC()
// get credential string. // Get credential string.
credential := getCredential(accessKeyID, location, t) credential := getCredential(accessKeyID, location, t)
// Get all signed headers. // Get all signed headers.
signedHeaders := getSignedHeaders(req) signedHeaders := getSignedHeaders(req)
// set URL query. // Set URL query.
query := req.URL.Query() query := req.URL.Query()
query.Set("X-Amz-Algorithm", signV4Algorithm) query.Set("X-Amz-Algorithm", signV4Algorithm)
query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
@ -221,10 +234,10 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
// Get string to sign from canonical request. // Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest) stringToSign := getStringToSignV4(t, location, canonicalRequest)
// get hmac signing key. // Gext hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t) signingKey := getSigningKey(secretAccessKey, location, t)
// calculate signature. // Calculate signature.
signature := getSignature(signingKey, stringToSign) signature := getSignature(signingKey, stringToSign)
// Add signature header to RawQuery. // Add signature header to RawQuery.
@ -233,9 +246,12 @@ func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
return &req return &req
} }
// PostPresignSignatureV4 - presigned signature for PostPolicy requests. // PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t) signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
signature := getSignature(signingkey, policyBase64) signature := getSignature(signingkey, policyBase64)
return signature return signature
} }
@ -243,6 +259,11 @@ func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
// SignV4 sign the request before Do(), in accordance with // SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time. // Initial time.
t := time.Now().UTC() t := time.Now().UTC()
@ -255,19 +276,19 @@ func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
// Get string to sign from canonical request. // Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest) stringToSign := getStringToSignV4(t, location, canonicalRequest)
// get hmac signing key. // Get hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t) signingKey := getSigningKey(secretAccessKey, location, t)
// get credential string. // Get credential string.
credential := getCredential(accessKeyID, location, t) credential := getCredential(accessKeyID, location, t)
// Get all signed headers. // Get all signed headers.
signedHeaders := getSignedHeaders(req) signedHeaders := getSignedHeaders(req)
// calculate signature. // Calculate signature.
signature := getSignature(signingKey, stringToSign) signature := getSignature(signingKey, stringToSign)
// if regular request, construct the final authorization header. // If regular request, construct the final authorization header.
parts := []string{ parts := []string{
signV4Algorithm + " Credential=" + credential, signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + signedHeaders, "SignedHeaders=" + signedHeaders,

View file

@ -1,3 +1,19 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio package minio
// SignatureType is type of Authorization requested for a given HTTP request. // SignatureType is type of Authorization requested for a given HTTP request.

View file

@ -37,7 +37,7 @@ func newTempFile(prefix string) (*tempFile, error) {
} }
return &tempFile{ return &tempFile{
File: file, File: file,
mutex: new(sync.Mutex), mutex: &sync.Mutex{},
}, nil }, nil
} }

View file

@ -17,7 +17,10 @@
package minio package minio
import ( import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/xml"
"io" "io"
"io/ioutil" "io/ioutil"
"net" "net"
@ -29,6 +32,26 @@ import (
"unicode/utf8" "unicode/utf8"
) )
// xmlDecoder provide decoded value in xml.
func xmlDecoder(body io.Reader, v interface{}) error {
d := xml.NewDecoder(body)
return d.Decode(v)
}
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// isPartUploaded - true if part is already uploaded. // isPartUploaded - true if part is already uploaded.
func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) { func isPartUploaded(objPart objectPart, objectParts map[int]objectPart) (isUploaded bool) {
_, isUploaded = objectParts[objPart.PartNumber] _, isUploaded = objectParts[objPart.PartNumber]
@ -261,7 +284,6 @@ func isValidObjectPrefix(objectPrefix string) error {
// - if input object size is -1 then return maxPartSize. // - if input object size is -1 then return maxPartSize.
// - if it happens to be that partSize is indeed bigger // - if it happens to be that partSize is indeed bigger
// than the maximum part size just return maxPartSize. // than the maximum part size just return maxPartSize.
//
func optimalPartSize(objectSize int64) int64 { func optimalPartSize(objectSize int64) int64 {
// if object size is -1 choose part size as 5GiB. // if object size is -1 choose part size as 5GiB.
if objectSize == -1 { if objectSize == -1 {