Support large layer for OSS driver

Signed-off-by: Li Yi <denverdino@gmail.com>
This commit is contained in:
Li Yi 2015-10-25 11:01:15 +08:00 committed by weiyuan.yl
parent e6c60e79c5
commit 54da47d636
22 changed files with 782 additions and 840 deletions

8
Godeps/Godeps.json generated
View file

@ -77,11 +77,15 @@
}, },
{ {
"ImportPath": "github.com/denverdino/aliyungo/oss", "ImportPath": "github.com/denverdino/aliyungo/oss",
"Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" "Rev": "f192209bcca7a2221ea0b100bf4569f484654d43"
}, },
{ {
"ImportPath": "github.com/denverdino/aliyungo/util", "ImportPath": "github.com/denverdino/aliyungo/util",
"Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" "Rev": "f192209bcca7a2221ea0b100bf4569f484654d43"
},
{
"ImportPath": "github.com/denverdino/aliyungo/common",
"Rev": "f192209bcca7a2221ea0b100bf4569f484654d43"
}, },
{ {
"ImportPath": "github.com/docker/docker/pkg/tarsum", "ImportPath": "github.com/docker/docker/pkg/tarsum",

View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015-2015 Li Yi (denverdino@gmail.com).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,145 @@
package common
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"time"
"github.com/denverdino/aliyungo/util"
)
// A Client represents a client of ECS services
type Client struct {
AccessKeyId string //Access Key Id
AccessKeySecret string //Access Key Secret
debug bool
httpClient *http.Client
endpoint string
version string
}
// NewClient creates a new instance of ECS client
func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) {
client.AccessKeyId = accessKeyId
client.AccessKeySecret = accessKeySecret + "&"
client.debug = false
client.httpClient = &http.Client{}
client.endpoint = endpoint
client.version = version
}
// SetEndpoint sets custom endpoint
func (client *Client) SetEndpoint(endpoint string) {
client.endpoint = endpoint
}
// SetEndpoint sets custom version
func (client *Client) SetVersion(version string) {
client.version = version
}
// SetAccessKeyId sets new AccessKeyId
func (client *Client) SetAccessKeyId(id string) {
client.AccessKeyId = id
}
// SetAccessKeySecret sets new AccessKeySecret
func (client *Client) SetAccessKeySecret(secret string) {
client.AccessKeySecret = secret + "&"
}
// SetDebug sets debug mode to log the request/response message
func (client *Client) SetDebug(debug bool) {
client.debug = debug
}
// Invoke sends the raw HTTP request for ECS services
func (client *Client) Invoke(action string, args interface{}, response interface{}) error {
request := Request{}
request.init(client.version, action, client.AccessKeyId)
query := util.ConvertToQueryValues(request)
util.SetQueryValues(args, &query)
// Sign request
signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret)
// Generate the request URL
requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature)
httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil)
// TODO move to util and add build val flag
httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version)
if err != nil {
return GetClientError(err)
}
t0 := time.Now()
httpResp, err := client.httpClient.Do(httpReq)
t1 := time.Now()
if err != nil {
return GetClientError(err)
}
statusCode := httpResp.StatusCode
if client.debug {
log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0))
}
defer httpResp.Body.Close()
body, err := ioutil.ReadAll(httpResp.Body)
if err != nil {
return GetClientError(err)
}
if client.debug {
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, body, "", " ")
log.Println(string(prettyJSON.Bytes()))
}
if statusCode >= 400 && statusCode <= 599 {
errorResponse := ErrorResponse{}
err = json.Unmarshal(body, &errorResponse)
ecsError := &Error{
ErrorResponse: errorResponse,
StatusCode: statusCode,
}
return ecsError
}
err = json.Unmarshal(body, response)
//log.Printf("%++v", response)
if err != nil {
return GetClientError(err)
}
return nil
}
// GenerateClientToken generates the Client Token with random string
func (client *Client) GenerateClientToken() string {
return util.CreateRandomString()
}
func GetClientErrorFromString(str string) error {
return &Error{
ErrorResponse: ErrorResponse{
Code: "AliyunGoClientFailure",
Message: str,
},
StatusCode: -1,
}
}
func GetClientError(err error) error {
return GetClientErrorFromString(err.Error())
}

View file

@ -0,0 +1,18 @@
package common
// Region represents ECS region
type Region string
// Constants of region definition
const (
Hangzhou = Region("cn-hangzhou")
Qingdao = Region("cn-qingdao")
Beijing = Region("cn-beijing")
Hongkong = Region("cn-hongkong")
Shenzhen = Region("cn-shenzhen")
USWest1 = Region("us-west-1")
APSouthEast1 = Region("ap-southeast-1")
Shanghai = Region("cn-shanghai")
)
var ValidRegions = []Region{Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, USWest1, APSouthEast1}

View file

@ -0,0 +1,101 @@
package common
import (
"fmt"
"log"
"time"
"github.com/denverdino/aliyungo/util"
)
// Constants for Aliyun API requests
const (
SignatureVersion = "1.0"
SignatureMethod = "HMAC-SHA1"
JSONResponseFormat = "JSON"
XMLResponseFormat = "XML"
ECSRequestMethod = "GET"
)
type Request struct {
Format string
Version string
AccessKeyId string
Signature string
SignatureMethod string
Timestamp util.ISO6801Time
SignatureVersion string
SignatureNonce string
ResourceOwnerAccount string
Action string
}
func (request *Request) init(version string, action string, AccessKeyId string) {
request.Format = JSONResponseFormat
request.Timestamp = util.NewISO6801Time(time.Now().UTC())
request.Version = version
request.SignatureVersion = SignatureVersion
request.SignatureMethod = SignatureMethod
request.SignatureNonce = util.CreateRandomString()
request.Action = action
request.AccessKeyId = AccessKeyId
}
type Response struct {
RequestId string
}
type ErrorResponse struct {
Response
HostId string
Code string
Message string
}
// An Error represents a custom error for ECS failure response
type Error struct {
ErrorResponse
StatusCode int //Status Code of HTTP Response
}
func (e *Error) Error() string {
return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message)
}
type Pagination struct {
PageNumber int
PageSize int
}
func (p *Pagination) SetPageSize(size int) {
p.PageSize = size
}
func (p *Pagination) Validate() {
if p.PageNumber < 0 {
log.Printf("Invalid PageNumber: %d", p.PageNumber)
p.PageNumber = 1
}
if p.PageSize < 0 {
log.Printf("Invalid PageSize: %d", p.PageSize)
p.PageSize = 10
} else if p.PageSize > 50 {
log.Printf("Invalid PageSize: %d", p.PageSize)
p.PageSize = 50
}
}
// A PaginationResponse represents a response with pagination information
type PaginationResult struct {
TotalCount int
PageNumber int
PageSize int
}
// NextPage gets the next page of the result set
func (r *PaginationResult) NextPage() *Pagination {
if r.PageNumber*r.PageSize >= r.TotalCount {
return nil
}
return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize}
}

View file

@ -0,0 +1,8 @@
package common
type InternetChargeType string
const (
PayByBandwidth = InternetChargeType("PayByBandwidth")
PayByTraffic = InternetChargeType("PayByTraffic")
)

View file

@ -0,0 +1,3 @@
package common
const Version = "0.1"

View file

@ -8,7 +8,6 @@ import (
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/denverdino/aliyungo/util"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@ -22,6 +21,9 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/denverdino/aliyungo/common"
"github.com/denverdino/aliyungo/util"
) )
const DefaultContentType = "application/octet-stream" const DefaultContentType = "application/octet-stream"
@ -34,7 +36,6 @@ type Client struct {
Internal bool Internal bool
Secure bool Secure bool
ConnectTimeout time.Duration ConnectTimeout time.Duration
ReadTimeout time.Duration
endpoint string endpoint string
debug bool debug bool
@ -168,6 +169,8 @@ func (client *Client) SetEndpoint(endpoint string) {
} }
// PutBucket creates a new bucket. // PutBucket creates a new bucket.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucket
func (b *Bucket) PutBucket(perm ACL) error { func (b *Bucket) PutBucket(perm ACL) error {
headers := make(http.Header) headers := make(http.Header)
if perm != "" { if perm != "" {
@ -185,13 +188,16 @@ func (b *Bucket) PutBucket(perm ACL) error {
// DelBucket removes an existing bucket. All objects in the bucket must // DelBucket removes an existing bucket. All objects in the bucket must
// be removed before the bucket itself can be removed. // be removed before the bucket itself can be removed.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&DeleteBucket
func (b *Bucket) DelBucket() (err error) { func (b *Bucket) DelBucket() (err error) {
req := &request{
method: "DELETE",
bucket: b.Name,
path: "/",
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "DELETE",
bucket: b.Name,
path: "/",
}
err = b.Client.query(req, nil) err = b.Client.query(req, nil)
if !shouldRetry(err) { if !shouldRetry(err) {
break break
@ -201,6 +207,8 @@ func (b *Bucket) DelBucket() (err error) {
} }
// Get retrieves an object from an bucket. // Get retrieves an object from an bucket.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&GetObject
func (b *Bucket) Get(path string) (data []byte, err error) { func (b *Bucket) Get(path string) (data []byte, err error) {
body, err := b.GetReader(path) body, err := b.GetReader(path)
if err != nil { if err != nil {
@ -237,16 +245,17 @@ func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
// It is the caller's responsibility to call Close on rc when // It is the caller's responsibility to call Close on rc when
// finished reading // finished reading
func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) {
req := &request{
bucket: b.Name,
path: path,
headers: headers,
}
err = b.Client.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
bucket: b.Name,
path: path,
headers: headers,
}
err = b.Client.prepare(req)
if err != nil {
return nil, err
}
resp, err := b.Client.run(req, nil) resp, err := b.Client.run(req, nil)
if shouldRetry(err) && attempt.HasNext() { if shouldRetry(err) && attempt.HasNext() {
continue continue
@ -271,17 +280,18 @@ func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err
} }
func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) {
req := &request{
bucket: b.Name,
path: path,
params: params,
headers: headers,
}
err = b.Client.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
bucket: b.Name,
path: path,
params: params,
headers: headers,
}
err = b.Client.prepare(req)
if err != nil {
return nil, err
}
resp, err := b.Client.run(req, nil) resp, err := b.Client.run(req, nil)
if shouldRetry(err) && attempt.HasNext() { if shouldRetry(err) && attempt.HasNext() {
continue continue
@ -296,16 +306,17 @@ func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values,
// Exists checks whether or not an object exists on an bucket using a HEAD request. // Exists checks whether or not an object exists on an bucket using a HEAD request.
func (b *Bucket) Exists(path string) (exists bool, err error) { func (b *Bucket) Exists(path string) (exists bool, err error) {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
}
err = b.Client.prepare(req)
if err != nil {
return
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
}
err = b.Client.prepare(req)
if err != nil {
return
}
resp, err := b.Client.run(req, nil) resp, err := b.Client.run(req, nil)
if shouldRetry(err) && attempt.HasNext() { if shouldRetry(err) && attempt.HasNext() {
@ -332,19 +343,22 @@ func (b *Bucket) Exists(path string) (exists bool, err error) {
} }
// Head HEADs an object in the bucket, returns the response with // Head HEADs an object in the bucket, returns the response with
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&HeadObject
func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
headers: headers,
}
err := b.Client.prepare(req)
if err != nil {
return nil, err
}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
method: "HEAD",
bucket: b.Name,
path: path,
headers: headers,
}
err := b.Client.prepare(req)
if err != nil {
return nil, err
}
resp, err := b.Client.run(req, nil) resp, err := b.Client.run(req, nil)
if shouldRetry(err) && attempt.HasNext() { if shouldRetry(err) && attempt.HasNext() {
continue continue
@ -352,18 +366,25 @@ func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return resp, err return resp, err
} }
return nil, fmt.Errorf("OSS Currently Unreachable") return nil, fmt.Errorf("OSS Currently Unreachable")
} }
// Put inserts an object into the bucket. // Put inserts an object into the bucket.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&PutObject
func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
body := bytes.NewBuffer(data) body := bytes.NewBuffer(data)
return b.PutReader(path, body, int64(len(data)), contType, perm, options) return b.PutReader(path, body, int64(len(data)), contType, perm, options)
} }
// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key // PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&CopyObject
func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) {
headers := make(http.Header) headers := make(http.Header)
@ -376,6 +397,7 @@ func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source stri
bucket: b.Name, bucket: b.Name,
path: path, path: path,
headers: headers, headers: headers,
timeout: 5 * time.Minute,
} }
resp := &CopyObjectResult{} resp := &CopyObjectResult{}
err := b.Client.query(req, resp) err := b.Client.query(req, resp)
@ -418,8 +440,8 @@ func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options)
} }
stats, err := file.Stat() stats, err := file.Stat()
if err != nil { if err != nil {
log.Panicf("Unable to read file %s stats.", file.Name()) log.Printf("Unable to read file %s stats.\n", file.Name())
return nil return err
} }
return b.PutReader(path, file, stats.Size(), contentType, perm, options) return b.PutReader(path, file, stats.Size(), contentType, perm, options)
@ -502,6 +524,8 @@ type WebsiteConfiguration struct {
} }
// PutBucketWebsite configures a bucket as a website. // PutBucketWebsite configures a bucket as a website.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&PutBucketWebsite
func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
doc, err := xml.Marshal(configuration) doc, err := xml.Marshal(configuration)
if err != nil { if err != nil {
@ -530,6 +554,8 @@ func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length in
} }
// Del removes an object from the bucket. // Del removes an object from the bucket.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteObject
func (b *Bucket) Del(path string) error { func (b *Bucket) Del(path string) error {
req := &request{ req := &request{
method: "DELETE", method: "DELETE",
@ -550,6 +576,8 @@ type Object struct {
} }
// DelMulti removes up to 1000 objects from the bucket. // DelMulti removes up to 1000 objects from the bucket.
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/object&DeleteMultipleObjects
func (b *Bucket) DelMulti(objects Delete) error { func (b *Bucket) DelMulti(objects Delete) error {
doc, err := xml.Marshal(objects) doc, err := xml.Marshal(objects)
if err != nil { if err != nil {
@ -627,7 +655,7 @@ type Key struct {
// will return keys alphabetically greater than the marker. // will return keys alphabetically greater than the marker.
// //
// The max parameter specifies how many keys + common prefixes to return in // The max parameter specifies how many keys + common prefixes to return in
// the response. The default is 1000. // the response, at most 1000. The default is 100.
// //
// For example, given these keys in a bucket: // For example, given these keys in a bucket:
// //
@ -667,6 +695,8 @@ type Key struct {
// }, // },
// } // }
// //
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucket
func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
params := make(url.Values) params := make(url.Values)
params.Set("prefix", prefix) params.Set("prefix", prefix)
@ -675,12 +705,12 @@ func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp,
if max != 0 { if max != 0 {
params.Set("max-keys", strconv.FormatInt(int64(max), 10)) params.Set("max-keys", strconv.FormatInt(int64(max), 10))
} }
req := &request{
bucket: b.Name,
params: params,
}
result = &ListResp{} result = &ListResp{}
for attempt := attempts.Start(); attempt.Next(); { for attempt := attempts.Start(); attempt.Next(); {
req := &request{
bucket: b.Name,
params: params,
}
err = b.Client.query(req, result) err = b.Client.query(req, result)
if !shouldRetry(err) { if !shouldRetry(err) {
break break
@ -700,66 +730,6 @@ func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp,
return result, nil return result, nil
} }
//// The VersionsResp type holds the results of a list bucket Versions operation.
//type VersionsResp struct {
// Name string
// Prefix string
// KeyMarker string
// VersionIdMarker string
// MaxKeys int
// Delimiter string
// IsTruncated bool
// Versions []Version `xml:"Version"`
// CommonPrefixes []string `xml:">Prefix"`
//}
//// The Version type represents an object version stored in an bucket.
//type Version struct {
// Key string
// VersionId string
// IsLatest bool
// LastModified string
// // ETag gives the hex-encoded MD5 sum of the contents,
// // surrounded with double-quotes.
// ETag string
// Size int64
// Owner Owner
// StorageClass string
//}
//func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
// params := url.Values{}
// params.Set("versions", "")
// params.Set("prefix", prefix)
// params.Set("delimiter", delim)
// if len(versionIdMarker) != 0 {
// params["version-id-marker"] = []string{versionIdMarker}
// }
// if len(keyMarker) != 0 {
// params["key-marker"] = []string{keyMarker}
// }
// if max != 0 {
// params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
// }
// req := &request{
// bucket: b.Name,
// params: params,
// }
// result = &VersionsResp{}
// for attempt := attempts.Start(); attempt.Next(); {
// err = b.Client.query(req, result)
// if !shouldRetry(err) {
// break
// }
// }
// if err != nil {
// return nil, err
// }
// return result, nil
//}
type GetLocationResp struct { type GetLocationResp struct {
Location string `xml:",innerxml"` Location string `xml:",innerxml"`
} }
@ -942,6 +912,7 @@ type request struct {
baseurl string baseurl string
payload io.Reader payload io.Reader
prepared bool prepared bool
timeout time.Duration
} }
func (req *request) url() (*url.URL, error) { func (req *request) url() (*url.URL, error) {
@ -1062,6 +1033,8 @@ func (client *Client) setupHttpRequest(req *request) (*http.Request, error) {
Form: req.params, Form: req.params,
} }
hreq.Header.Set("X-SDK-Client", `AliyunGO/`+common.Version)
contentLength := req.headers.Get("Content-Length") contentLength := req.headers.Get("Content-Length")
if contentLength != "" { if contentLength != "" {
@ -1079,28 +1052,11 @@ func (client *Client) setupHttpRequest(req *request) (*http.Request, error) {
// doHttpRequest sends hreq and returns the http response from the server. // doHttpRequest sends hreq and returns the http response from the server.
// If resp is not nil, the XML data contained in the response // If resp is not nil, the XML data contained in the response
// body will be unmarshalled on it. // body will be unmarshalled on it.
func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { func (client *Client) doHttpRequest(c *http.Client, hreq *http.Request, resp interface{}) (*http.Response, error) {
c := http.Client{
Transport: &http.Transport{
Dial: func(netw, addr string) (c net.Conn, err error) {
deadline := time.Now().Add(client.ReadTimeout)
if client.ConnectTimeout > 0 {
c, err = net.DialTimeout(netw, addr, client.ConnectTimeout)
} else {
c, err = net.Dial(netw, addr)
}
if err != nil {
return
}
if client.ReadTimeout > 0 {
err = c.SetDeadline(deadline)
}
return
},
Proxy: http.ProxyFromEnvironment,
},
}
if true {
log.Printf("%s %s ...\n", hreq.Method, hreq.URL.String())
}
hresp, err := c.Do(hreq) hresp, err := c.Do(hreq)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1110,7 +1066,7 @@ func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http
contentType := hresp.Header.Get("Content-Type") contentType := hresp.Header.Get("Content-Type")
if contentType == "application/xml" || contentType == "text/xml" { if contentType == "application/xml" || contentType == "text/xml" {
dump, _ := httputil.DumpResponse(hresp, true) dump, _ := httputil.DumpResponse(hresp, true)
log.Printf("} -> %s\n", dump) log.Printf("%s\n", dump)
} else { } else {
log.Printf("Response Content-Type: %s\n", contentType) log.Printf("Response Content-Type: %s\n", contentType)
} }
@ -1143,7 +1099,25 @@ func (client *Client) run(req *request, resp interface{}) (*http.Response, error
return nil, err return nil, err
} }
return client.doHttpRequest(hreq, resp) c := &http.Client{
Transport: &http.Transport{
Dial: func(netw, addr string) (c net.Conn, err error) {
if client.ConnectTimeout > 0 {
c, err = net.DialTimeout(netw, addr, client.ConnectTimeout)
} else {
c, err = net.Dial(netw, addr)
}
if err != nil {
return
}
return
},
Proxy: http.ProxyFromEnvironment,
},
Timeout: req.timeout,
}
return client.doHttpRequest(c, hreq, resp)
} }
// Error represents an error in an operation with OSS. // Error represents an error in an operation with OSS.
@ -1186,10 +1160,21 @@ func (client *Client) buildError(r *http.Response) error {
return &err return &err
} }
type TimeoutError interface {
error
Timeout() bool // Is the error a timeout?
}
func shouldRetry(err error) bool { func shouldRetry(err error) bool {
if err == nil { if err == nil {
return false return false
} }
_, ok := err.(TimeoutError)
if ok {
return true
}
switch err { switch err {
case io.ErrUnexpectedEOF, io.EOF: case io.ErrUnexpectedEOF, io.EOF:
return true return true
@ -1245,6 +1230,8 @@ type AccessControlPolicy struct {
} }
// ACL returns ACL of bucket // ACL returns ACL of bucket
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/bucket&GetBucketAcl
func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { func (b *Bucket) ACL() (result *AccessControlPolicy, err error) {
params := make(url.Values) params := make(url.Values)
@ -1263,3 +1250,75 @@ func (b *Bucket) ACL() (result *AccessControlPolicy, err error) {
return &resp, nil return &resp, nil
} }
const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize
func (b *Bucket) GetContentLength(sourcePath string) (int64, error) {
resp, err := b.Head(sourcePath, nil)
if err != nil {
return 0, err
}
currentLength := resp.ContentLength
return currentLength, err
}
// Copy large file in the same bucket
func (b *Bucket) CopyLargeFile(sourcePath string, destPath string, contentType string, perm ACL, options Options) error {
log.Printf("Copy large file from %s to %s\n", sourcePath, destPath)
currentLength, err := b.GetContentLength(sourcePath)
if err != nil {
return err
}
multi, err := b.InitMulti(destPath, contentType, perm, options)
if err != nil {
return err
}
parts := []Part{}
defer func() {
if len(parts) > 0 {
if multi == nil {
// Parts should be empty if the multi is not initialized
panic("Unreachable")
} else {
if multi.Complete(parts) != nil {
multi.Abort()
}
}
}
}()
var start int64 = 0
var to int64 = 0
var partNumber = 0
sourcePathForCopy := b.Path(sourcePath)
for start = 0; start < currentLength; start = to {
to = start + defaultChunkSize
if to > currentLength {
to = currentLength
}
partNumber++
rangeStr := fmt.Sprintf("bytes=%d-%d", start, to-1)
_, part, err := multi.PutPartCopy(partNumber,
CopyOptions{CopySourceOptions: rangeStr},
sourcePathForCopy)
if err != nil {
return err
}
parts = append(parts, part)
}
return err
}

View file

@ -1,211 +0,0 @@
package oss_test
import (
"bytes"
"io/ioutil"
//"net/http"
"testing"
"time"
"github.com/denverdino/aliyungo/oss"
)
var (
//If you test on ECS, you can set the internal param to true
client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false)
)
func TestCreateBucket(t *testing.T) {
b := client.Bucket(TestBucket)
err := b.PutBucket(oss.Private)
if err != nil {
t.Errorf("Failed for PutBucket: %v", err)
}
t.Log("Wait a while for bucket creation ...")
time.Sleep(10 * time.Second)
}
func TestHead(t *testing.T) {
b := client.Bucket(TestBucket)
_, err := b.Head("name", nil)
if err == nil {
t.Errorf("Failed for Head: %v", err)
}
}
func TestPutObject(t *testing.T) {
const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\""
b := client.Bucket(TestBucket)
err := b.Put("name", []byte("content"), "content-type", oss.Private, oss.Options{ContentDisposition: DISPOSITION})
if err != nil {
t.Errorf("Failed for Put: %v", err)
}
}
func TestGet(t *testing.T) {
b := client.Bucket(TestBucket)
data, err := b.Get("name")
if err != nil || string(data) != "content" {
t.Errorf("Failed for Get: %v", err)
}
}
func TestURL(t *testing.T) {
b := client.Bucket(TestBucket)
url := b.URL("name")
t.Log("URL: ", url)
// /c.Assert(req.URL.Path, check.Equals, "/denverdino_test/name")
}
func TestGetReader(t *testing.T) {
b := client.Bucket(TestBucket)
rc, err := b.GetReader("name")
if err != nil {
t.Fatalf("Failed for GetReader: %v", err)
}
data, err := ioutil.ReadAll(rc)
rc.Close()
if err != nil || string(data) != "content" {
t.Errorf("Failed for ReadAll: %v", err)
}
}
func aTestGetNotFound(t *testing.T) {
b := client.Bucket("non-existent-bucket")
_, err := b.Get("non-existent")
if err == nil {
t.Fatalf("Failed for TestGetNotFound: %v", err)
}
ossErr, _ := err.(*oss.Error)
if ossErr.StatusCode != 404 || ossErr.BucketName != "non-existent-bucket" {
t.Errorf("Failed for TestGetNotFound: %v", err)
}
}
func TestPutCopy(t *testing.T) {
b := client.Bucket(TestBucket)
t.Log("Source: ", b.Path("name"))
res, err := b.PutCopy("newname", oss.Private, oss.CopyOptions{},
b.Path("name"))
if err == nil {
t.Logf("Copy result: %v", res)
} else {
t.Errorf("Failed for PutCopy: %v", err)
}
}
func TestList(t *testing.T) {
b := client.Bucket(TestBucket)
data, err := b.List("n", "", "", 0)
if err != nil || len(data.Contents) != 2 {
t.Errorf("Failed for List: %v", err)
} else {
t.Logf("Contents = %++v", data)
}
}
func TestListWithDelimiter(t *testing.T) {
b := client.Bucket(TestBucket)
data, err := b.List("photos/2006/", "/", "some-marker", 1000)
if err != nil || len(data.Contents) != 0 {
t.Errorf("Failed for List: %v", err)
} else {
t.Logf("Contents = %++v", data)
}
}
func TestPutReader(t *testing.T) {
b := client.Bucket(TestBucket)
buf := bytes.NewBufferString("content")
err := b.PutReader("name", buf, int64(buf.Len()), "content-type", oss.Private, oss.Options{})
if err != nil {
t.Errorf("Failed for PutReader: %v", err)
}
TestGetReader(t)
}
func TestExists(t *testing.T) {
b := client.Bucket(TestBucket)
result, err := b.Exists("name")
if err != nil || result != true {
t.Errorf("Failed for Exists: %v", err)
}
}
func TestLocation(t *testing.T) {
b := client.Bucket(TestBucket)
result, err := b.Location()
if err != nil || result != string(TestRegion) {
t.Errorf("Failed for Location: %v %s", err, result)
}
}
func TestACL(t *testing.T) {
b := client.Bucket(TestBucket)
result, err := b.ACL()
if err != nil {
t.Errorf("Failed for ACL: %v", err)
} else {
t.Logf("AccessControlPolicy: %++v", result)
}
}
func TestDelObject(t *testing.T) {
b := client.Bucket(TestBucket)
err := b.Del("name")
if err != nil {
t.Errorf("Failed for Del: %v", err)
}
}
func TestDelMultiObjects(t *testing.T) {
b := client.Bucket(TestBucket)
objects := []oss.Object{oss.Object{Key: "newname"}}
err := b.DelMulti(oss.Delete{
Quiet: false,
Objects: objects,
})
if err != nil {
t.Errorf("Failed for DelMulti: %v", err)
}
}
func TestGetService(t *testing.T) {
bucketList, err := client.GetService()
if err != nil {
t.Errorf("Unable to get service: %v", err)
} else {
t.Logf("GetService: %++v", bucketList)
}
}
func TestDelBucket(t *testing.T) {
b := client.Bucket(TestBucket)
err := b.DelBucket()
if err != nil {
t.Errorf("Failed for DelBucket: %v", err)
}
}

View file

@ -1,14 +0,0 @@
package oss_test
import (
"github.com/denverdino/aliyungo/oss"
)
//Modify with your Access Key Id and Access Key Secret
const (
TestAccessKeyId = "MY_ACCESS_KEY_ID"
TestAccessKeySecret = "MY_ACCESS_KEY_ID"
TestIAmRich = false
TestRegion = oss.Beijing
TestBucket = "denverdino"
)

View file

@ -8,6 +8,7 @@ import (
"encoding/xml" "encoding/xml"
"errors" "errors"
"io" "io"
"time"
//"log" //"log"
"net/http" "net/http"
"net/url" "net/url"
@ -106,6 +107,8 @@ func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi,
// InitMulti initializes a new multipart upload at the provided // InitMulti initializes a new multipart upload at the provided
// key inside b and returns a value for manipulating it. // key inside b and returns a value for manipulating it.
// //
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&InitiateMultipartUpload
func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
headers := make(http.Header) headers := make(http.Header)
headers.Set("Content-Length", "0") headers.Set("Content-Length", "0")
@ -138,6 +141,8 @@ func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Option
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
} }
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPartCopy
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
// TODO source format a /BUCKET/PATH/TO/OBJECT // TODO source format a /BUCKET/PATH/TO/OBJECT
// TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path)
@ -187,15 +192,25 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj
// PutPart sends part n of the multipart upload, reading all the content from r. // PutPart sends part n of the multipart upload, reading all the content from r.
// Each part, except for the last one, must be at least 5MB in size. // Each part, except for the last one, must be at least 5MB in size.
// //
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&UploadPart
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r) partSize, _, md5b64, err := seekerInfo(r)
if err != nil { if err != nil {
return Part{}, err return Part{}, err
} }
return m.putPart(n, r, partSize, md5b64) return m.putPart(n, r, partSize, md5b64, 0)
} }
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { func (m *Multi) PutPartWithTimeout(n int, r io.ReadSeeker, timeout time.Duration) (Part, error) {
partSize, _, md5b64, err := seekerInfo(r)
if err != nil {
return Part{}, err
}
return m.putPart(n, r, partSize, md5b64, timeout)
}
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string, timeout time.Duration) (Part, error) {
headers := make(http.Header) headers := make(http.Header)
headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) headers.Set("Content-Length", strconv.FormatInt(partSize, 10))
headers.Set("Content-MD5", md5b64) headers.Set("Content-MD5", md5b64)
@ -216,6 +231,7 @@ func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (
headers: headers, headers: headers,
params: params, params: params,
payload: r, payload: r,
timeout: timeout,
} }
err = m.Bucket.Client.prepare(req) err = m.Bucket.Client.prepare(req)
if err != nil { if err != nil {
@ -370,7 +386,7 @@ NextSection:
} }
// Part wasn't found or doesn't match. Send it. // Part wasn't found or doesn't match. Send it.
part, err := m.putPart(current, section, partSize, md5b64) part, err := m.putPart(current, section, partSize, md5b64, 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -443,6 +459,8 @@ func (m *Multi) Complete(parts []Part) error {
// handled internally, but it's not clear what happens precisely (Is an // handled internally, but it's not clear what happens precisely (Is an
// error returned? Is the issue completely undetectable?). // error returned? Is the issue completely undetectable?).
// //
//
// You can read doc at http://docs.aliyun.com/#/pub/oss/api-reference/multipart-upload&AbortMultipartUpload
func (m *Multi) Abort() error { func (m *Multi) Abort() error {
params := make(url.Values) params := make(url.Values)
params.Set("uploadId", m.UploadId) params.Set("uploadId", m.UploadId)

View file

@ -1,161 +0,0 @@
package oss_test
import (
//"encoding/xml"
"github.com/denverdino/aliyungo/oss"
"testing"
//"io"
//"io/ioutil"
"strings"
)
func TestCreateBucketMulti(t *testing.T) {
TestCreateBucket(t)
}
func TestInitMulti(t *testing.T) {
b := client.Bucket(TestBucket)
metadata := make(map[string][]string)
metadata["key1"] = []string{"value1"}
metadata["key2"] = []string{"value2"}
options := oss.Options{
ServerSideEncryption: true,
Meta: metadata,
ContentEncoding: "text/utf8",
CacheControl: "no-cache",
ContentMD5: "0000000000000000",
}
multi, err := b.InitMulti("multi", "text/plain", oss.Private, options)
if err != nil {
t.Errorf("Failed for InitMulti: %v", err)
} else {
t.Logf("InitMulti result: %++v", multi)
}
}
func TestMultiReturnOld(t *testing.T) {
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Errorf("Failed for Multi: %v", err)
} else {
t.Logf("Multi result: %++v", multi)
}
}
func TestPutPart(t *testing.T) {
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Fatalf("Failed for Multi: %v", err)
}
part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
if err != nil {
t.Errorf("Failed for PutPart: %v", err)
} else {
t.Logf("PutPart result: %++v", part)
}
}
func TestPutPartCopy(t *testing.T) {
TestPutObject(t)
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Fatalf("Failed for Multi: %v", err)
}
res, part, err := multi.PutPartCopy(2, oss.CopyOptions{}, b.Path("name"))
if err != nil {
t.Errorf("Failed for PutPartCopy: %v", err)
} else {
t.Logf("PutPartCopy result: %++v %++v", part, res)
}
TestDelObject(t)
}
func TestListParts(t *testing.T) {
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Fatalf("Failed for Multi: %v", err)
}
parts, err := multi.ListParts()
if err != nil {
t.Errorf("Failed for ListParts: %v", err)
} else {
t.Logf("ListParts result: %++v", parts)
}
}
func TestListMulti(t *testing.T) {
b := client.Bucket(TestBucket)
multis, prefixes, err := b.ListMulti("", "/")
if err != nil {
t.Errorf("Failed for ListMulti: %v", err)
} else {
t.Logf("ListMulti result : %++v %++v", multis, prefixes)
}
}
func TestMultiAbort(t *testing.T) {
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Fatalf("Failed for Multi: %v", err)
}
err = multi.Abort()
if err != nil {
t.Errorf("Failed for Abort: %v", err)
}
}
func TestPutAll(t *testing.T) {
TestInitMulti(t)
// Don't retry the NoSuchUpload error.
b := client.Bucket(TestBucket)
multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
if err != nil {
t.Fatalf("Failed for Multi: %v", err)
}
// Must send at least one part, so that completing it will work.
parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
if err != nil {
t.Errorf("Failed for PutAll: %v", err)
} else {
t.Logf("PutAll result: %++v", parts)
}
// // Must send at least one part, so that completing it will work.
// err = multi.Complete(parts)
// if err != nil {
// t.Errorf("Failed for Complete: %v", err)
// }
err = multi.Abort()
if err != nil {
t.Errorf("Failed for Abort: %v", err)
}
}
func TestCleanUp(t *testing.T) {
TestDelBucket(t)
}

View file

@ -9,12 +9,15 @@ type Region string
// Constants of region definition // Constants of region definition
const ( const (
Hangzhou = Region("oss-cn-hangzhou") Hangzhou = Region("oss-cn-hangzhou")
Qingdao = Region("oss-cn-qingdao") Qingdao = Region("oss-cn-qingdao")
Beijing = Region("oss-cn-beijing") Beijing = Region("oss-cn-beijing")
Hongkong = Region("oss-cn-hongkong") Hongkong = Region("oss-cn-hongkong")
Shenzhen = Region("oss-cn-shenzhen") Shenzhen = Region("oss-cn-shenzhen")
USWest1 = Region("oss-us-west-1") USWest1 = Region("oss-us-west-1")
APSouthEast1 = Region("oss-ap-southeast-1")
Shanghai = Region("oss-cn-shanghai")
DefaultRegion = Hangzhou DefaultRegion = Hangzhou
) )

View file

@ -1,90 +0,0 @@
package util
import (
"testing"
"time"
)
func TestAttemptTiming(t *testing.T) {
testAttempt := AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
if len(got) != len(want) {
t.Fatalf("Failed!")
}
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
t.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func TestAttemptNextHasNext(t *testing.T) {
a := AttemptStrategy{}.Start()
if !a.Next() {
t.Fatalf("Failed!")
}
if a.Next() {
t.Fatalf("Failed!")
}
a = AttemptStrategy{}.Start()
if !a.Next() {
t.Fatalf("Failed!")
}
if a.HasNext() {
t.Fatalf("Failed!")
}
if a.Next() {
t.Fatalf("Failed!")
}
a = AttemptStrategy{Total: 2e8}.Start()
if !a.Next() {
t.Fatalf("Failed!")
}
if !a.HasNext() {
t.Fatalf("Failed!")
}
time.Sleep(2e8)
if !a.HasNext() {
t.Fatalf("Failed!")
}
if !a.Next() {
t.Fatalf("Failed!")
}
if a.Next() {
t.Fatalf("Failed!")
}
a = AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
if !a.Next() {
t.Fatalf("Failed!")
}
if !a.HasNext() {
t.Fatalf("Failed!")
}
if !a.Next() {
t.Fatalf("Failed!")
}
if a.HasNext() {
t.Fatalf("Failed!")
}
if a.Next() {
t.Fatalf("Failed!")
}
}

View file

@ -23,13 +23,24 @@ func SetQueryValues(ifc interface{}, values *url.Values) {
} }
func setQueryValues(i interface{}, values *url.Values, prefix string) { func setQueryValues(i interface{}, values *url.Values, prefix string) {
// add to support url.Values
mapValues, ok := i.(url.Values)
if ok {
for k, _ := range mapValues {
values.Set(k, mapValues.Get(k))
}
return
}
elem := reflect.ValueOf(i) elem := reflect.ValueOf(i)
if elem.Kind() == reflect.Ptr { if elem.Kind() == reflect.Ptr {
elem = elem.Elem() elem = elem.Elem()
} }
elemType := elem.Type() elemType := elem.Type()
for i := 0; i < elem.NumField(); i++ { for i := 0; i < elem.NumField(); i++ {
fieldName := elemType.Field(i).Name fieldName := elemType.Field(i).Name
anonymous := elemType.Field(i).Anonymous
field := elem.Field(i) field := elem.Field(i)
// TODO Use Tag for validation // TODO Use Tag for validation
// tag := typ.Field(i).Tag.Get("tagname") // tag := typ.Field(i).Tag.Get("tagname")
@ -62,6 +73,19 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) {
value = strconv.FormatBool(field.Bool()) value = strconv.FormatBool(field.Bool())
case reflect.String: case reflect.String:
value = field.String() value = field.String()
case reflect.Map:
ifc := field.Interface()
m := ifc.(map[string]string)
if m != nil {
j := 0
for k, v := range m {
j++
keyName := fmt.Sprintf("%s.%d.Key", fieldName, j)
values.Set(keyName, k)
valueName := fmt.Sprintf("%s.%d.Value", fieldName, j)
values.Set(valueName, v)
}
}
case reflect.Slice: case reflect.Slice:
switch field.Type().Elem().Kind() { switch field.Type().Elem().Kind() {
case reflect.Uint8: case reflect.Uint8:
@ -85,7 +109,7 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) {
for j := 0; j < l; j++ { for j := 0; j < l; j++ {
prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1))
ifc := field.Index(j).Interface() ifc := field.Index(j).Interface()
log.Printf("%s : %v", prefixName, ifc) //log.Printf("%s : %v", prefixName, ifc)
if ifc != nil { if ifc != nil {
setQueryValues(ifc, values, prefixName) setQueryValues(ifc, values, prefixName)
} }
@ -104,7 +128,12 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) {
default: default:
ifc := field.Interface() ifc := field.Interface()
if ifc != nil { if ifc != nil {
SetQueryValues(ifc, values) if anonymous {
SetQueryValues(ifc, values)
} else {
prefixName := fieldName + "."
setQueryValues(ifc, values, prefixName)
}
continue continue
} }
} }

View file

@ -1,52 +0,0 @@
package util
import (
"testing"
"time"
)
type TestString string
type SubStruct struct {
A string
B int
}
type TestStruct struct {
Format string
Version string
AccessKeyId string
Timestamp time.Time
Empty string
IntValue int `ArgName:"int-value"`
BoolPtr *bool `ArgName:"bool-ptr"`
IntPtr *int `ArgName:"int-ptr"`
StringArray []string `ArgName:"str-array"`
StructArray []SubStruct
test TestString
tests []TestString
}
func TestConvertToQueryValues(t *testing.T) {
boolValue := true
request := TestStruct{
Format: "JSON",
Version: "1.0",
Timestamp: time.Date(2015, time.Month(5), 26, 1, 2, 3, 4, time.UTC),
IntValue: 10,
BoolPtr: &boolValue,
StringArray: []string{"abc", "xyz"},
StructArray: []SubStruct{
SubStruct{A: "a", B: 1},
SubStruct{A: "x", B: 2},
},
test: TestString("test"),
tests: []TestString{TestString("test1"), TestString("test2")},
}
result := ConvertToQueryValues(&request).Encode()
const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D&test=test&tests=%5B%22test1%22%2C%22test2%22%5D"
if result != expectedResult {
t.Error("Incorrect encoding: ", result)
}
}

View file

@ -2,6 +2,7 @@ package util
import ( import (
"fmt" "fmt"
"strconv"
"time" "time"
) )
@ -13,6 +14,8 @@ func GetISO8601TimeStamp(ts time.Time) string {
const formatISO8601 = "2006-01-02T15:04:05Z" const formatISO8601 = "2006-01-02T15:04:05Z"
const jsonFormatISO8601 = `"` + formatISO8601 + `"` const jsonFormatISO8601 = `"` + formatISO8601 + `"`
const formatISO8601withoutSeconds = "2006-01-02T15:04Z"
const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"`
// A ISO6801Time represents a time in ISO8601 format // A ISO6801Time represents a time in ISO8601 format
type ISO6801Time time.Time type ISO6801Time time.Time
@ -46,10 +49,25 @@ func (it ISO6801Time) MarshalJSON() ([]byte, error) {
// UnmarshalJSON deserializes the ISO6801Time from JSON string // UnmarshalJSON deserializes the ISO6801Time from JSON string
func (it *ISO6801Time) UnmarshalJSON(data []byte) error { func (it *ISO6801Time) UnmarshalJSON(data []byte) error {
if string(data) == "\"\"" { str := string(data)
if str == "\"\"" || len(data) == 0 {
return nil return nil
} }
t, err := time.ParseInLocation(jsonFormatISO8601, string(data), time.UTC) var t time.Time
var err error
if str[0] == '"' {
t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC)
if err != nil {
t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC)
}
} else {
var i int64
i, err = strconv.ParseInt(str, 10, 64)
if err == nil {
t = time.Unix(i/1000, i%1000)
}
}
if err == nil { if err == nil {
*it = ISO6801Time(t) *it = ISO6801Time(t)
} }

View file

@ -1,50 +0,0 @@
package util
import (
"encoding/json"
"testing"
"time"
)
func TestISO8601Time(t *testing.T) {
now := NewISO6801Time(time.Now().UTC())
data, err := json.Marshal(now)
if err != nil {
t.Fatal(err)
}
_, err = time.Parse(`"`+formatISO8601+`"`, string(data))
if err != nil {
t.Fatal(err)
}
var now2 ISO6801Time
err = json.Unmarshal(data, &now2)
if err != nil {
t.Fatal(err)
}
if now != now2 {
t.Errorf("Time %s does not equal expected %s", now2, now)
}
if now.String() != now2.String() {
t.Fatalf("String format for %s does not equal expected %s", now2, now)
}
type TestTimeStruct struct {
A int
B *ISO6801Time
}
var testValue TestTimeStruct
err = json.Unmarshal([]byte("{\"A\": 1, \"B\":\"\"}"), &testValue)
if err != nil {
t.Fatal(err)
}
t.Logf("%v", testValue)
if !testValue.B.IsDefault() {
t.Fatal("Invaid Unmarshal result for ISO6801Time from empty value")
}
t.Logf("ISO6801Time String(): %s", now2.String())
}

View file

@ -1,14 +0,0 @@
package util
import (
"testing"
)
func TestCreateSignature(t *testing.T) {
str := "GET&%2F&AccessKeyId%3Dtestid%26Action%3DDescribeRegions%26Format%3DXML%26RegionId%3Dregion1%26SignatureMethod%3DHMAC-SHA1%26SignatureNonce%3DNwDAxvLU6tFE0DVb%26SignatureVersion%3D1.0%26TimeStamp%3D2012-12-26T10%253A33%253A56Z%26Version%3D2014-05-26"
signature := CreateSignature(str, "testsecret")
t.Log(signature)
}

View file

@ -8,18 +8,31 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
"strconv"
"time" "time"
) )
const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
//CreateRandomString create random string //CreateRandomString create random string
func CreateRandomString() string { func CreateRandomString() string {
b := make([]byte, 32)
l := len(dictionary)
rand.Seed(time.Now().UnixNano()) _, err := srand.Read(b)
randInt := rand.Int63()
randStr := strconv.FormatInt(randInt, 36)
return randStr if err != nil {
// fail back to insecure rand
rand.Seed(time.Now().UnixNano())
for i := range b {
b[i] = dictionary[rand.Int()%l]
}
} else {
for i, v := range b {
b[i] = dictionary[v%byte(l)]
}
}
return string(b)
} }
// Encode encodes the values into ``URL encoded'' form // Encode encodes the values into ``URL encoded'' form

View file

@ -1,43 +0,0 @@
package util
import (
"testing"
)
func TestGenerateRandomECSPassword(t *testing.T) {
for i := 0; i < 10; i++ {
s := GenerateRandomECSPassword()
if len(s) < 8 || len(s) > 30 {
t.Errorf("Generated ECS password [%v]: bad len", s)
}
hasDigit := false
hasLower := false
hasUpper := false
for j := range s {
switch {
case '0' <= s[j] && s[j] <= '9':
hasDigit = true
case 'a' <= s[j] && s[j] <= 'z':
hasLower = true
case 'A' <= s[j] && s[j] <= 'Z':
hasUpper = true
}
}
if !hasDigit {
t.Errorf("Generated ECS password [%v]: no digit", s)
}
if !hasLower {
t.Errorf("Generated ECS password [%v]: no lower letter ", s)
}
if !hasUpper {
t.Errorf("Generated ECS password [%v]: no upper letter", s)
}
}
}

View file

@ -39,6 +39,7 @@ const driverName = "oss"
const minChunkSize = 5 << 20 const minChunkSize = 5 << 20
const defaultChunkSize = 2 * minChunkSize const defaultChunkSize = 2 * minChunkSize
const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk
// listMax is the largest amount of objects you can request from OSS in a list call // listMax is the largest amount of objects you can request from OSS in a list call
const listMax = 1000 const listMax = 1000
@ -195,13 +196,14 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
return New(params) return New(params)
} }
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and
// bucketName // bucketName
func New(params DriverParameters) (*Driver, error) { func New(params DriverParameters) (*Driver, error) {
client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure)
client.SetEndpoint(params.Endpoint) client.SetEndpoint(params.Endpoint)
bucket := client.Bucket(params.Bucket) bucket := client.Bucket(params.Bucket)
client.SetDebug(false)
// Validate that the given credentials have at least read permissions in the // Validate that the given credentials have at least read permissions in the
// given bucket scope. // given bucket scope.
@ -403,35 +405,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea
var err error var err error
var part oss.Part var part oss.Part
loop: part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout)
for retries := 0; retries < 5; retries++ {
part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]))
if err == nil {
break // success!
}
// NOTE(stevvooe): This retry code tries to only retry under
// conditions where the OSS package does not. We may add oss
// error codes to the below if we see others bubble up in the
// application. Right now, the most troubling is
// RequestTimeout, which seems to only triggered when a tcp
// connection to OSS slows to a crawl. If the RequestTimeout
// ends up getting added to the OSS library and we don't see
// other errors, this retry loop can be removed.
switch err := err.(type) {
case *oss.Error:
switch err.Code {
case "RequestTimeout":
// allow retries on only this error.
default:
break loop
}
}
backoff := 100 * time.Millisecond * time.Duration(retries+1)
logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String())
time.Sleep(backoff)
}
if err != nil { if err != nil {
logrus.Errorf("error putting part, aborting: %v", err) logrus.Errorf("error putting part, aborting: %v", err)
@ -456,7 +430,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea
if offset > 0 { if offset > 0 {
resp, err := d.Bucket.Head(d.ossPath(path), nil) resp, err := d.Bucket.Head(d.ossPath(path), nil)
if err != nil { if err != nil {
if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != 404 {
return 0, err return 0, err
} }
} }
@ -511,7 +485,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea
fromZeroFillLarge := func(from, to int64) error { fromZeroFillLarge := func(from, to int64) error {
bytesRead64 := int64(0) bytesRead64 := int64(0)
for to-(from+bytesRead64) >= d.ChunkSize { for to-(from+bytesRead64) >= d.ChunkSize {
part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout)
if err != nil { if err != nil {
return err return err
} }
@ -553,7 +527,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea
return totalRead, err return totalRead, err
} }
part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout)
if err != nil { if err != nil {
return totalRead, err return totalRead, err
} }
@ -706,15 +680,14 @@ func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
// Move moves an object stored at sourcePath to destPath, removing the original // Move moves an object stored at sourcePath to destPath, removing the original
// object. // object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath))
/* This is terrible, but aws doesn't have an actual move. */
_, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath),
oss.CopyOptions{ d.getContentType(),
//Options: d.getOptions(), getPermissions(),
//ContentType: d.getContentType() oss.Options{})
},
d.Bucket.Path(d.ossPath(sourcePath)))
if err != nil { if err != nil {
logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err)
return parseError(sourcePath, err) return parseError(sourcePath, err)
} }
@ -756,13 +729,12 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int
method, ok := options["method"] method, ok := options["method"]
if ok { if ok {
methodString, ok = method.(string) methodString, ok = method.(string)
if !ok || (methodString != "GET" && methodString != "HEAD") { if !ok || (methodString != "GET" && methodString != "PUT") {
return "", storagedriver.ErrUnsupportedMethod{} return "", storagedriver.ErrUnsupportedMethod{driverName}
} }
} }
expiresTime := time.Now().Add(20 * time.Minute) expiresTime := time.Now().Add(20 * time.Minute)
logrus.Infof("expiresTime: %d", expiresTime)
expires, ok := options["expiry"] expires, ok := options["expiry"]
if ok { if ok {
@ -771,7 +743,7 @@ func (d *driver) URLFor(ctx context.Context, path string, options map[string]int
expiresTime = et expiresTime = et
} }
} }
logrus.Infof("expiresTime: %d", expiresTime) logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime)
testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil)
logrus.Infof("testURL: %s", testURL) logrus.Infof("testURL: %s", testURL)
return testURL, nil return testURL, nil
@ -781,11 +753,6 @@ func (d *driver) ossPath(path string) string {
return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/")
} }
// S3BucketKey returns the OSS bucket key for the given storage driver path.
func (d *Driver) S3BucketKey(path string) string {
return d.StorageDriver.(*driver).ossPath(path)
}
func parseError(path string, err error) error { func parseError(path string, err error) error {
if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" {
return storagedriver.PathNotFoundError{Path: path} return storagedriver.PathNotFoundError{Path: path}