diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 17428002..d21f5260 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -44,6 +44,14 @@
 			"Comment": "1.2.0-66-g6086d79",
 			"Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1"
 		},
+		{
+			"ImportPath": "github.com/denverdino/aliyungo/oss",
+			"Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb"
+		},
+		{
+			"ImportPath": "github.com/denverdino/aliyungo/util",
+			"Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb"
+		},
 		{
 			"ImportPath": "github.com/docker/docker/pkg/tarsum",
 			"Comment": "v1.4.1-3932-gb63ec6e",
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go
new file mode 100644
index 00000000..8e38ea4a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go
@@ -0,0 +1,1252 @@
+package oss
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/md5"
+	"crypto/sha1"
+	"encoding/base64"
+	"encoding/xml"
+	"fmt"
+	"github.com/denverdino/aliyungo/util"
+	"io"
+	"io/ioutil"
+	"log"
+	"mime"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const DefaultContentType = "application/octet-stream"
+
+// The Client type encapsulates operations with an OSS region.
+type Client struct {
+	AccessKeyId     string
+	AccessKeySecret string
+	Region          Region
+	Internal        bool
+	Secure          bool
+	ConnectTimeout  time.Duration
+	ReadTimeout     time.Duration
+	debug           bool
+}
+
+// The Bucket type encapsulates operations with an bucket.
+type Bucket struct {
+	*Client
+	Name string
+}
+
+// The Owner type represents the owner of the object in an bucket.
+type Owner struct {
+	ID          string
+	DisplayName string
+}
+
+// Options struct
+//
+type Options struct {
+	ServerSideEncryption bool
+	Meta                 map[string][]string
+	ContentEncoding      string
+	CacheControl         string
+	ContentMD5           string
+	ContentDisposition   string
+	//Range              string
+	//Expires int
+}
+
+type CopyOptions struct {
+	Headers           http.Header
+	CopySourceOptions string
+	MetadataDirective string
+	//ContentType       string
+}
+
+// CopyObjectResult is the output from a Copy request
+type CopyObjectResult struct {
+	ETag         string
+	LastModified string
+}
+
+var attempts = util.AttemptStrategy{
+	Min:   5,
+	Total: 5 * time.Second,
+	Delay: 200 * time.Millisecond,
+}
+
+// NewOSSClient creates a new OSS.
+
+func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client {
+	return &Client{
+		AccessKeyId:     accessKeyId,
+		AccessKeySecret: accessKeySecret,
+		Region:          region,
+		Internal:        internal,
+		debug:           false,
+		Secure:          secure,
+	}
+}
+
+// SetDebug sets debug mode to log the request/response message
+func (client *Client) SetDebug(debug bool) {
+	client.debug = debug
+}
+
+// Bucket returns a Bucket with the given name.
+func (client *Client) Bucket(name string) *Bucket {
+	name = strings.ToLower(name)
+	return &Bucket{
+		Client: client,
+		Name:   name,
+	}
+}
+
+type BucketInfo struct {
+	Name         string
+	CreationDate string
+}
+
+type GetServiceResp struct {
+	Owner   Owner
+	Buckets []BucketInfo `xml:">Bucket"`
+}
+
+// GetService gets a list of all buckets owned by an account.
+func (client *Client) GetService() (*GetServiceResp, error) {
+	bucket := client.Bucket("")
+
+	r, err := bucket.Get("")
+	if err != nil {
+		return nil, err
+	}
+
+	// Parse the XML response.
+	var resp GetServiceResp
+	if err = xml.Unmarshal(r, &resp); err != nil {
+		return nil, err
+	}
+
+	return &resp, nil
+}
+
+type ACL string
+
+const (
+	Private           = ACL("private")
+	PublicRead        = ACL("public-read")
+	PublicReadWrite   = ACL("public-read-write")
+	AuthenticatedRead = ACL("authenticated-read")
+	BucketOwnerRead   = ACL("bucket-owner-read")
+	BucketOwnerFull   = ACL("bucket-owner-full-control")
+)
+
+var createBucketConfiguration = `<CreateBucketConfiguration>
+  <LocationConstraint>%s</LocationConstraint>
+</CreateBucketConfiguration>`
+
+// locationConstraint returns an io.Reader specifying a LocationConstraint if
+// required for the region.
+func (client *Client) locationConstraint() io.Reader {
+	constraint := fmt.Sprintf(createBucketConfiguration, client.Region)
+	return strings.NewReader(constraint)
+}
+
+// PutBucket creates a new bucket.
+func (b *Bucket) PutBucket(perm ACL) error {
+	headers := make(http.Header)
+	if perm != "" {
+		headers.Set("x-oss-acl", string(perm))
+	}
+	req := &request{
+		method:  "PUT",
+		bucket:  b.Name,
+		path:    "/",
+		headers: headers,
+		payload: b.Client.locationConstraint(),
+	}
+	return b.Client.query(req, nil)
+}
+
+// DelBucket removes an existing bucket. All objects in the bucket must
+// be removed before the bucket itself can be removed.
+func (b *Bucket) DelBucket() (err error) {
+	req := &request{
+		method: "DELETE",
+		bucket: b.Name,
+		path:   "/",
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		err = b.Client.query(req, nil)
+		if !shouldRetry(err) {
+			break
+		}
+	}
+	return err
+}
+
+// Get retrieves an object from an bucket.
+func (b *Bucket) Get(path string) (data []byte, err error) {
+	body, err := b.GetReader(path)
+	if err != nil {
+		return nil, err
+	}
+	data, err = ioutil.ReadAll(body)
+	body.Close()
+	return data, err
+}
+
+// GetReader retrieves an object from an bucket,
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading.
+func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) {
+	resp, err := b.GetResponse(path)
+	if resp != nil {
+		return resp.Body, err
+	}
+	return nil, err
+}
+
+// GetResponse retrieves an object from an bucket,
+// returning the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) {
+	return b.GetResponseWithHeaders(path, make(http.Header))
+}
+
+// GetResponseWithHeaders retrieves an object from an bucket
+// Accepts custom headers to be sent as the second parameter
+// returning the body of the HTTP response.
+// It is the caller's responsibility to call Close on rc when
+// finished reading
+func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) {
+	req := &request{
+		bucket:  b.Name,
+		path:    path,
+		headers: headers,
+	}
+	err = b.Client.prepare(req)
+	if err != nil {
+		return nil, err
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		resp, err := b.Client.run(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return resp, nil
+	}
+	panic("unreachable")
+}
+
+// Get retrieves an object from an bucket.
+func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) {
+	resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+	data, err = ioutil.ReadAll(resp.Body)
+	resp.Body.Close()
+	return data, err
+}
+
+func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) {
+	req := &request{
+		bucket:  b.Name,
+		path:    path,
+		params:  params,
+		headers: headers,
+	}
+	err = b.Client.prepare(req)
+	if err != nil {
+		return nil, err
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		resp, err := b.Client.run(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return resp, nil
+	}
+	panic("unreachable")
+}
+
+// Exists checks whether or not an object exists on an bucket using a HEAD request.
+func (b *Bucket) Exists(path string) (exists bool, err error) {
+	req := &request{
+		method: "HEAD",
+		bucket: b.Name,
+		path:   path,
+	}
+	err = b.Client.prepare(req)
+	if err != nil {
+		return
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		resp, err := b.Client.run(req, nil)
+
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+
+		if err != nil {
+			// We can treat a 403 or 404 as non existance
+			if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
+				return false, nil
+			}
+			return false, err
+		}
+
+		if resp.StatusCode/100 == 2 {
+			exists = true
+		}
+		if resp.Body != nil {
+			resp.Body.Close()
+		}
+		return exists, err
+	}
+	return false, fmt.Errorf("OSS Currently Unreachable")
+}
+
+// Head HEADs an object in the bucket, returns the response with
+func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) {
+	req := &request{
+		method:  "HEAD",
+		bucket:  b.Name,
+		path:    path,
+		headers: headers,
+	}
+	err := b.Client.prepare(req)
+	if err != nil {
+		return nil, err
+	}
+
+	for attempt := attempts.Start(); attempt.Next(); {
+		resp, err := b.Client.run(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return resp, err
+	}
+	return nil, fmt.Errorf("OSS Currently Unreachable")
+}
+
+// Put inserts an object into the bucket.
+func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error {
+	body := bytes.NewBuffer(data)
+	return b.PutReader(path, body, int64(len(data)), contType, perm, options)
+}
+
+// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key
+func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) {
+	headers := make(http.Header)
+
+	headers.Set("x-oss-acl", string(perm))
+	headers.Set("x-oss-copy-source", source)
+
+	options.addHeaders(headers)
+	req := &request{
+		method:  "PUT",
+		bucket:  b.Name,
+		path:    path,
+		headers: headers,
+	}
+	resp := &CopyObjectResult{}
+	err := b.Client.query(req, resp)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// PutReader inserts an object into the bucket by consuming data
+// from r until EOF.
+func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error {
+	headers := make(http.Header)
+	headers.Set("Content-Length", strconv.FormatInt(length, 10))
+	headers.Set("Content-Type", contType)
+	headers.Set("x-oss-acl", string(perm))
+
+	options.addHeaders(headers)
+	req := &request{
+		method:  "PUT",
+		bucket:  b.Name,
+		path:    path,
+		headers: headers,
+		payload: r,
+	}
+	return b.Client.query(req, nil)
+}
+
+// PutFile creates/updates object with file
+func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error {
+	var contentType string
+	if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 {
+		contentType = DefaultContentType
+	} else {
+		if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" {
+			contentType = DefaultContentType
+		} else {
+			contentType = mimeType
+		}
+	}
+	stats, err := file.Stat()
+	if err != nil {
+		log.Panicf("Unable to read file %s stats.", file.Name())
+		return nil
+	}
+
+	return b.PutReader(path, file, stats.Size(), contentType, perm, options)
+}
+
+// addHeaders adds o's specified fields to headers
+func (o Options) addHeaders(headers http.Header) {
+	if o.ServerSideEncryption {
+		headers.Set("x-oss-server-side-encryption", "AES256")
+	}
+	if len(o.ContentEncoding) != 0 {
+		headers.Set("Content-Encoding", o.ContentEncoding)
+	}
+	if len(o.CacheControl) != 0 {
+		headers.Set("Cache-Control", o.CacheControl)
+	}
+	if len(o.ContentMD5) != 0 {
+		headers.Set("Content-MD5", o.ContentMD5)
+	}
+	if len(o.ContentDisposition) != 0 {
+		headers.Set("Content-Disposition", o.ContentDisposition)
+	}
+
+	for k, v := range o.Meta {
+		for _, mv := range v {
+			headers.Add("x-oss-meta-"+k, mv)
+		}
+	}
+}
+
+// addHeaders adds o's specified fields to headers
+func (o CopyOptions) addHeaders(headers http.Header) {
+	if len(o.MetadataDirective) != 0 {
+		headers.Set("x-oss-metadata-directive", o.MetadataDirective)
+	}
+	if len(o.CopySourceOptions) != 0 {
+		headers.Set("x-oss-copy-source-range", o.CopySourceOptions)
+	}
+	if o.Headers != nil {
+		for k, v := range o.Headers {
+			newSlice := make([]string, len(v))
+			copy(newSlice, v)
+			headers[k] = newSlice
+		}
+	}
+}
+
+func makeXMLBuffer(doc []byte) *bytes.Buffer {
+	buf := new(bytes.Buffer)
+	buf.WriteString(xml.Header)
+	buf.Write(doc)
+	return buf
+}
+
+type IndexDocument struct {
+	Suffix string `xml:"Suffix"`
+}
+
+type ErrorDocument struct {
+	Key string `xml:"Key"`
+}
+
+type RoutingRule struct {
+	ConditionKeyPrefixEquals     string `xml:"Condition>KeyPrefixEquals"`
+	RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"`
+	RedirectReplaceKeyWith       string `xml:"Redirect>ReplaceKeyWith,omitempty"`
+}
+
+type RedirectAllRequestsTo struct {
+	HostName string `xml:"HostName"`
+	Protocol string `xml:"Protocol,omitempty"`
+}
+
+type WebsiteConfiguration struct {
+	XMLName               xml.Name               `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"`
+	IndexDocument         *IndexDocument         `xml:"IndexDocument,omitempty"`
+	ErrorDocument         *ErrorDocument         `xml:"ErrorDocument,omitempty"`
+	RoutingRules          *[]RoutingRule         `xml:"RoutingRules>RoutingRule,omitempty"`
+	RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"`
+}
+
+// PutBucketWebsite configures a bucket as a website.
+func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error {
+	doc, err := xml.Marshal(configuration)
+	if err != nil {
+		return err
+	}
+
+	buf := makeXMLBuffer(doc)
+
+	return b.PutBucketSubresource("website", buf, int64(buf.Len()))
+}
+
+func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error {
+	headers := make(http.Header)
+	headers.Set("Content-Length", strconv.FormatInt(length, 10))
+
+	req := &request{
+		path:    "/",
+		method:  "PUT",
+		bucket:  b.Name,
+		headers: headers,
+		payload: r,
+		params:  url.Values{subresource: {""}},
+	}
+
+	return b.Client.query(req, nil)
+}
+
+// Del removes an object from the bucket.
+func (b *Bucket) Del(path string) error {
+	req := &request{
+		method: "DELETE",
+		bucket: b.Name,
+		path:   path,
+	}
+	return b.Client.query(req, nil)
+}
+
+type Delete struct {
+	Quiet   bool     `xml:"Quiet,omitempty"`
+	Objects []Object `xml:"Object"`
+}
+
+type Object struct {
+	Key       string `xml:"Key"`
+	VersionId string `xml:"VersionId,omitempty"`
+}
+
+// DelMulti removes up to 1000 objects from the bucket.
+func (b *Bucket) DelMulti(objects Delete) error {
+	doc, err := xml.Marshal(objects)
+	if err != nil {
+		return err
+	}
+
+	buf := makeXMLBuffer(doc)
+	digest := md5.New()
+	size, err := digest.Write(buf.Bytes())
+	if err != nil {
+		return err
+	}
+
+	headers := make(http.Header)
+	headers.Set("Content-Length", strconv.FormatInt(int64(size), 10))
+	headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil)))
+	headers.Set("Content-Type", "text/xml")
+
+	req := &request{
+		path:    "/",
+		method:  "POST",
+		params:  url.Values{"delete": {""}},
+		bucket:  b.Name,
+		headers: headers,
+		payload: buf,
+	}
+
+	return b.Client.query(req, nil)
+}
+
+// The ListResp type holds the results of a List bucket operation.
+type ListResp struct {
+	Name      string
+	Prefix    string
+	Delimiter string
+	Marker    string
+	MaxKeys   int
+	// IsTruncated is true if the results have been truncated because
+	// there are more keys and prefixes than can fit in MaxKeys.
+	// N.B. this is the opposite sense to that documented (incorrectly) in
+	// http://goo.gl/YjQTc
+	IsTruncated    bool
+	Contents       []Key
+	CommonPrefixes []string `xml:">Prefix"`
+	// if IsTruncated is true, pass NextMarker as marker argument to List()
+	// to get the next set of keys
+	NextMarker string
+}
+
+// The Key type represents an item stored in an bucket.
+type Key struct {
+	Key          string
+	LastModified string
+	Type         string
+	Size         int64
+	// ETag gives the hex-encoded MD5 sum of the contents,
+	// surrounded with double-quotes.
+	ETag         string
+	StorageClass string
+	Owner        Owner
+}
+
+// List returns information about objects in an bucket.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix.
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+// The marker parameter specifies the key to start with when listing objects
+// in a bucket. OSS lists objects in alphabetical order and
+// will return keys alphabetically greater than the marker.
+//
+// The max parameter specifies how many keys + common prefixes to return in
+// the response. The default is 1000.
+//
+// For example, given these keys in a bucket:
+//
+//     index.html
+//     index2.html
+//     photos/2006/January/sample.jpg
+//     photos/2006/February/sample2.jpg
+//     photos/2006/February/sample3.jpg
+//     photos/2006/February/sample4.jpg
+//
+// Listing this bucket with delimiter set to "/" would yield the
+// following result:
+//
+//     &ListResp{
+//         Name:      "sample-bucket",
+//         MaxKeys:   1000,
+//         Delimiter: "/",
+//         Contents:  []Key{
+//             {Key: "index.html", "index2.html"},
+//         },
+//         CommonPrefixes: []string{
+//             "photos/",
+//         },
+//     }
+//
+// Listing the same bucket with delimiter set to "/" and prefix set to
+// "photos/2006/" would yield the following result:
+//
+//     &ListResp{
+//         Name:      "sample-bucket",
+//         MaxKeys:   1000,
+//         Delimiter: "/",
+//         Prefix:    "photos/2006/",
+//         CommonPrefixes: []string{
+//             "photos/2006/February/",
+//             "photos/2006/January/",
+//         },
+//     }
+//
+func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) {
+	params := make(url.Values)
+	params.Set("prefix", prefix)
+	params.Set("delimiter", delim)
+	params.Set("marker", marker)
+	if max != 0 {
+		params.Set("max-keys", strconv.FormatInt(int64(max), 10))
+	}
+	req := &request{
+		bucket: b.Name,
+		params: params,
+	}
+	result = &ListResp{}
+	for attempt := attempts.Start(); attempt.Next(); {
+		err = b.Client.query(req, result)
+		if !shouldRetry(err) {
+			break
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	// if NextMarker is not returned, it should be set to the name of last key,
+	// so let's do it so that each caller doesn't have to
+	if result.IsTruncated && result.NextMarker == "" {
+		n := len(result.Contents)
+		if n > 0 {
+			result.NextMarker = result.Contents[n-1].Key
+		}
+	}
+	return result, nil
+}
+
+//// The VersionsResp type holds the results of a list bucket Versions operation.
+//type VersionsResp struct {
+//	Name            string
+//	Prefix          string
+//	KeyMarker       string
+//	VersionIdMarker string
+//	MaxKeys         int
+//	Delimiter       string
+//	IsTruncated     bool
+//	Versions        []Version `xml:"Version"`
+//	CommonPrefixes  []string  `xml:">Prefix"`
+//}
+
+//// The Version type represents an object version stored in an bucket.
+//type Version struct {
+//	Key          string
+//	VersionId    string
+//	IsLatest     bool
+//	LastModified string
+//	// ETag gives the hex-encoded MD5 sum of the contents,
+//	// surrounded with double-quotes.
+//	ETag         string
+//	Size         int64
+//	Owner        Owner
+//	StorageClass string
+//}
+
+//func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) {
+//	params := url.Values{}
+//	params.Set("versions", "")
+//	params.Set("prefix", prefix)
+//	params.Set("delimiter", delim)
+
+//	if len(versionIdMarker) != 0 {
+//		params["version-id-marker"] = []string{versionIdMarker}
+//	}
+//	if len(keyMarker) != 0 {
+//		params["key-marker"] = []string{keyMarker}
+//	}
+
+//	if max != 0 {
+//		params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)}
+//	}
+//	req := &request{
+//		bucket: b.Name,
+//		params: params,
+//	}
+//	result = &VersionsResp{}
+//	for attempt := attempts.Start(); attempt.Next(); {
+//		err = b.Client.query(req, result)
+//		if !shouldRetry(err) {
+//			break
+//		}
+//	}
+//	if err != nil {
+//		return nil, err
+//	}
+//	return result, nil
+//}
+
+type GetLocationResp struct {
+	Location string `xml:",innerxml"`
+}
+
+func (b *Bucket) Location() (string, error) {
+	params := make(url.Values)
+	params.Set("location", "")
+	r, err := b.GetWithParams("/", params)
+
+	if err != nil {
+		return "", err
+	}
+
+	// Parse the XML response.
+	var resp GetLocationResp
+	if err = xml.Unmarshal(r, &resp); err != nil {
+		return "", err
+	}
+
+	if resp.Location == "" {
+		return string(Hangzhou), nil
+	}
+	return resp.Location, nil
+}
+
+func (b *Bucket) Path(path string) string {
+	if !strings.HasPrefix(path, "/") {
+		path = "/" + path
+	}
+	return "/" + b.Name + path
+}
+
+// URL returns a non-signed URL that allows retriving the
+// object at path. It only works if the object is publicly
+// readable (see SignedURL).
+func (b *Bucket) URL(path string) string {
+	req := &request{
+		bucket: b.Name,
+		path:   path,
+	}
+	err := b.Client.prepare(req)
+	if err != nil {
+		panic(err)
+	}
+	u, err := req.url()
+	if err != nil {
+		panic(err)
+	}
+	u.RawQuery = ""
+	return u.String()
+}
+
+// SignedURL returns a signed URL that allows anyone holding the URL
+// to retrieve the object at path. The signature is valid until expires.
+func (b *Bucket) SignedURL(path string, expires time.Time) string {
+	return b.SignedURLWithArgs(path, expires, nil, nil)
+}
+
+// SignedURLWithArgs returns a signed URL that allows anyone holding the URL
+// to retrieve the object at path. The signature is valid until expires.
+func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string {
+	return b.SignedURLWithMethod("GET", path, expires, params, headers)
+}
+
+// SignedURLWithMethod returns a signed URL that allows anyone holding the URL
+// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires.
+func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string {
+	var uv = url.Values{}
+
+	if params != nil {
+		uv = params
+	}
+
+	uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10))
+	uv.Set("OSSAccessKeyId", b.AccessKeyId)
+
+	req := &request{
+		method:  method,
+		bucket:  b.Name,
+		path:    path,
+		params:  uv,
+		headers: headers,
+	}
+	err := b.Client.prepare(req)
+	if err != nil {
+		panic(err)
+	}
+	u, err := req.url()
+	if err != nil {
+		panic(err)
+	}
+
+	return u.String()
+}
+
+// UploadSignedURL returns a signed URL that allows anyone holding the URL
+// to upload the object at path. The signature is valid until expires.
+// contenttype is a string like image/png
+// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself]
+func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string {
+	//TODO TESTING
+	expireDate := expires.Unix()
+	if method != "POST" {
+		method = "PUT"
+	}
+
+	tokenData := ""
+
+	stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name)
+	secretKey := b.AccessKeySecret
+	accessId := b.AccessKeyId
+	mac := hmac.New(sha1.New, []byte(secretKey))
+	mac.Write([]byte(stringToSign))
+	macsum := mac.Sum(nil)
+	signature := base64.StdEncoding.EncodeToString([]byte(macsum))
+	signature = strings.TrimSpace(signature)
+
+	signedurl, err := url.Parse("https://" + b.Name + ".client.amazonaws.com/")
+	if err != nil {
+		log.Println("ERROR sining url for OSS upload", err)
+		return ""
+	}
+	signedurl.Path = name
+	params := url.Values{}
+	params.Add("OSSAccessKeyId", accessId)
+	params.Add("Expires", strconv.FormatInt(expireDate, 10))
+	params.Add("Signature", signature)
+
+	signedurl.RawQuery = params.Encode()
+	return signedurl.String()
+}
+
+// PostFormArgsEx returns the action and input fields needed to allow anonymous
+// uploads to a bucket within the expiration limit
+// Additional conditions can be specified with conds
+func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) {
+	conditions := []string{}
+	fields = map[string]string{
+		"AWSAccessKeyId": b.AccessKeyId,
+		"key":            path,
+	}
+
+	if conds != nil {
+		conditions = append(conditions, conds...)
+	}
+
+	conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path))
+	conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name))
+	if redirect != "" {
+		conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect))
+		fields["success_action_redirect"] = redirect
+	}
+
+	vExpiration := expires.Format("2006-01-02T15:04:05Z")
+	vConditions := strings.Join(conditions, ",")
+	policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions)
+	policy64 := base64.StdEncoding.EncodeToString([]byte(policy))
+	fields["policy"] = policy64
+
+	signer := hmac.New(sha1.New, []byte(b.AccessKeySecret))
+	signer.Write([]byte(policy64))
+	fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil))
+
+	action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name)
+	return
+}
+
+// PostFormArgs returns the action and input fields needed to allow anonymous
+// uploads to a bucket within the expiration limit
+func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) {
+	return b.PostFormArgsEx(path, expires, redirect, nil)
+}
+
+type request struct {
+	method   string
+	bucket   string
+	path     string
+	params   url.Values
+	headers  http.Header
+	baseurl  string
+	payload  io.Reader
+	prepared bool
+}
+
+func (req *request) url() (*url.URL, error) {
+	u, err := url.Parse(req.baseurl)
+	if err != nil {
+		return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err)
+	}
+	u.RawQuery = req.params.Encode()
+	u.Path = req.path
+	return u, nil
+}
+
+// query prepares and runs the req request.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (client *Client) query(req *request, resp interface{}) error {
+	err := client.prepare(req)
+	if err != nil {
+		return err
+	}
+	r, err := client.run(req, resp)
+	if r != nil && r.Body != nil {
+		r.Body.Close()
+	}
+	return err
+}
+
+// Sets baseurl on req from bucket name and the region endpoint
+func (client *Client) setBaseURL(req *request) error {
+	req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure)
+
+	return nil
+}
+
+// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls.
+//
+// Some commands including:
+//      GET Bucket acl              http://goo.gl/aoXflF
+//      GET Bucket cors             http://goo.gl/UlmBdx
+//      GET Bucket lifecycle        http://goo.gl/8Fme7M
+//      GET Bucket policy           http://goo.gl/ClXIo3
+//      GET Bucket location         http://goo.gl/5lh8RD
+//      GET Bucket Logging          http://goo.gl/sZ5ckF
+//      GET Bucket notification     http://goo.gl/qSSZKD
+//      GET Bucket tagging          http://goo.gl/QRvxnM
+// require the first character after the bucket name in the path to be a literal '?' and
+// not the escaped hex representation '%3F'.
+func partiallyEscapedPath(path string) string {
+	pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/")
+	if len(pathEscapedAndSplit) >= 3 {
+		if len(pathEscapedAndSplit[2]) >= 3 {
+			// Check for the one "?" that should not be escaped.
+			if pathEscapedAndSplit[2][0:3] == "%3F" {
+				pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:]
+			}
+		}
+	}
+	return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1)
+}
+
+// prepare sets up req to be delivered to OSS.
+func (client *Client) prepare(req *request) error {
+	// Copy so they can be mutated without affecting on retries.
+	headers := copyHeader(req.headers)
+	params := make(url.Values)
+
+	for k, v := range req.params {
+		params[k] = v
+	}
+
+	req.params = params
+	req.headers = headers
+
+	if !req.prepared {
+		req.prepared = true
+		if req.method == "" {
+			req.method = "GET"
+		}
+
+		if !strings.HasPrefix(req.path, "/") {
+			req.path = "/" + req.path
+		}
+
+		err := client.setBaseURL(req)
+		if err != nil {
+			return err
+		}
+	}
+
+	req.headers.Set("Date", util.GetGMTime())
+	client.signRequest(req)
+
+	return nil
+}
+
+// Prepares an *http.Request for doHttpRequest
+func (client *Client) setupHttpRequest(req *request) (*http.Request, error) {
+	// Copy so that signing the http request will not mutate it
+
+	u, err := req.url()
+	if err != nil {
+		return nil, err
+	}
+	u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path))
+
+	hreq := http.Request{
+		URL:        u,
+		Method:     req.method,
+		ProtoMajor: 1,
+		ProtoMinor: 1,
+		Close:      true,
+		Header:     req.headers,
+		Form:       req.params,
+	}
+
+	contentLength := req.headers.Get("Content-Length")
+
+	if contentLength != "" {
+		hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64)
+		req.headers.Del("Content-Length")
+	}
+
+	if req.payload != nil {
+		hreq.Body = ioutil.NopCloser(req.payload)
+	}
+
+	return &hreq, nil
+}
+
+// doHttpRequest sends hreq and returns the http response from the server.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) {
+	c := http.Client{
+		Transport: &http.Transport{
+			Dial: func(netw, addr string) (c net.Conn, err error) {
+				deadline := time.Now().Add(client.ReadTimeout)
+				if client.ConnectTimeout > 0 {
+					c, err = net.DialTimeout(netw, addr, client.ConnectTimeout)
+				} else {
+					c, err = net.Dial(netw, addr)
+				}
+				if err != nil {
+					return
+				}
+				if client.ReadTimeout > 0 {
+					err = c.SetDeadline(deadline)
+				}
+				return
+			},
+			Proxy: http.ProxyFromEnvironment,
+		},
+	}
+
+	hresp, err := c.Do(hreq)
+	if err != nil {
+		return nil, err
+	}
+	if client.debug {
+		log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode)
+		contentType := hresp.Header.Get("Content-Type")
+		if contentType == "application/xml" || contentType == "text/xml" {
+			dump, _ := httputil.DumpResponse(hresp, true)
+			log.Printf("} -> %s\n", dump)
+		} else {
+			log.Printf("Response Content-Type: %s\n", contentType)
+		}
+	}
+	if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 {
+		return nil, client.buildError(hresp)
+	}
+	if resp != nil {
+		err = xml.NewDecoder(hresp.Body).Decode(resp)
+		hresp.Body.Close()
+
+		if client.debug {
+			log.Printf("aliyungo.oss> decoded xml into %#v", resp)
+		}
+
+	}
+	return hresp, err
+}
+
+// run sends req and returns the http response from the server.
+// If resp is not nil, the XML data contained in the response
+// body will be unmarshalled on it.
+func (client *Client) run(req *request, resp interface{}) (*http.Response, error) {
+	if client.debug {
+		log.Printf("Running OSS request: %#v", req)
+	}
+
+	hreq, err := client.setupHttpRequest(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return client.doHttpRequest(hreq, resp)
+}
+
+// Error represents an error in an operation with OSS.
+type Error struct {
+	StatusCode int    // HTTP status code (200, 403, ...)
+	Code       string // OSS error code ("UnsupportedOperation", ...)
+	Message    string // The human-oriented error message
+	BucketName string
+	RequestId  string
+	HostId     string
+}
+
+func (e *Error) Error() string {
+	return e.Message
+}
+
+func (client *Client) buildError(r *http.Response) error {
+	if client.debug {
+		log.Printf("got error (status code %v)", r.StatusCode)
+		data, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			log.Printf("\tread error: %v", err)
+		} else {
+			log.Printf("\tdata:\n%s\n\n", data)
+		}
+		r.Body = ioutil.NopCloser(bytes.NewBuffer(data))
+	}
+
+	err := Error{}
+	// TODO return error if Unmarshal fails?
+	xml.NewDecoder(r.Body).Decode(&err)
+	r.Body.Close()
+	err.StatusCode = r.StatusCode
+	if err.Message == "" {
+		err.Message = r.Status
+	}
+	if client.debug {
+		log.Printf("err: %#v\n", err)
+	}
+	return &err
+}
+
+func shouldRetry(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err {
+	case io.ErrUnexpectedEOF, io.EOF:
+		return true
+	}
+	switch e := err.(type) {
+	case *net.DNSError:
+		return true
+	case *net.OpError:
+		switch e.Op {
+		case "read", "write":
+			return true
+		}
+	case *url.Error:
+		// url.Error can be returned either by net/url if a URL cannot be
+		// parsed, or by net/http if the response is closed before the headers
+		// are received or parsed correctly. In that later case, e.Op is set to
+		// the HTTP method name with the first letter uppercased. We don't want
+		// to retry on POST operations, since those are not idempotent, all the
+		// other ones should be safe to retry.
+		switch e.Op {
+		case "Get", "Put", "Delete", "Head":
+			return shouldRetry(e.Err)
+		default:
+			return false
+		}
+	case *Error:
+		switch e.Code {
+		case "InternalError", "NoSuchUpload", "NoSuchBucket":
+			return true
+		}
+	}
+	return false
+}
+
+func hasCode(err error, code string) bool {
+	e, ok := err.(*Error)
+	return ok && e.Code == code
+}
+
+func copyHeader(header http.Header) (newHeader http.Header) {
+	newHeader = make(http.Header)
+	for k, v := range header {
+		newSlice := make([]string, len(v))
+		copy(newSlice, v)
+		newHeader[k] = newSlice
+	}
+	return
+}
+
+type AccessControlPolicy struct {
+	Owner  Owner
+	Grants []string `xml:"AccessControlList>Grant"`
+}
+
+// ACL returns ACL of bucket
+func (b *Bucket) ACL() (result *AccessControlPolicy, err error) {
+
+	params := make(url.Values)
+	params.Set("acl", "")
+
+	r, err := b.GetWithParams("/", params)
+	if err != nil {
+		return nil, err
+	}
+
+	// Parse the XML response.
+	var resp AccessControlPolicy
+	if err = xml.Unmarshal(r, &resp); err != nil {
+		return nil, err
+	}
+
+	return &resp, nil
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go
new file mode 100644
index 00000000..13bc8768
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go
@@ -0,0 +1,211 @@
+package oss_test
+
+import (
+	"bytes"
+	"io/ioutil"
+	//"net/http"
+	"testing"
+	"time"
+
+	"github.com/denverdino/aliyungo/oss"
+)
+
+var (
+	//If you test on ECS, you can set the internal param to true
+	client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false)
+)
+
+func TestCreateBucket(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	err := b.PutBucket(oss.Private)
+	if err != nil {
+		t.Errorf("Failed for PutBucket: %v", err)
+	}
+	t.Log("Wait a while for bucket creation ...")
+	time.Sleep(10 * time.Second)
+}
+
+func TestHead(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	_, err := b.Head("name", nil)
+
+	if err == nil {
+		t.Errorf("Failed for Head: %v", err)
+	}
+}
+
+func TestPutObject(t *testing.T) {
+	const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\""
+
+	b := client.Bucket(TestBucket)
+	err := b.Put("name", []byte("content"), "content-type", oss.Private, oss.Options{ContentDisposition: DISPOSITION})
+	if err != nil {
+		t.Errorf("Failed for Put: %v", err)
+	}
+}
+
+func TestGet(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	data, err := b.Get("name")
+
+	if err != nil || string(data) != "content" {
+		t.Errorf("Failed for Get: %v", err)
+	}
+}
+
+func TestURL(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	url := b.URL("name")
+
+	t.Log("URL: ", url)
+	//	/c.Assert(req.URL.Path, check.Equals, "/denverdino_test/name")
+}
+
+func TestGetReader(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	rc, err := b.GetReader("name")
+	if err != nil {
+		t.Fatalf("Failed for GetReader: %v", err)
+	}
+	data, err := ioutil.ReadAll(rc)
+	rc.Close()
+	if err != nil || string(data) != "content" {
+		t.Errorf("Failed for ReadAll: %v", err)
+	}
+}
+
+func aTestGetNotFound(t *testing.T) {
+
+	b := client.Bucket("non-existent-bucket")
+	_, err := b.Get("non-existent")
+	if err == nil {
+		t.Fatalf("Failed for TestGetNotFound: %v", err)
+	}
+	ossErr, _ := err.(*oss.Error)
+	if ossErr.StatusCode != 404 || ossErr.BucketName != "non-existent-bucket" {
+		t.Errorf("Failed for TestGetNotFound: %v", err)
+	}
+
+}
+
+func TestPutCopy(t *testing.T) {
+	b := client.Bucket(TestBucket)
+	t.Log("Source: ", b.Path("name"))
+	res, err := b.PutCopy("newname", oss.Private, oss.CopyOptions{},
+		b.Path("name"))
+	if err == nil {
+		t.Logf("Copy result: %v", res)
+	} else {
+		t.Errorf("Failed for PutCopy: %v", err)
+	}
+}
+
+func TestList(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	data, err := b.List("n", "", "", 0)
+	if err != nil || len(data.Contents) != 2 {
+		t.Errorf("Failed for List: %v", err)
+	} else {
+		t.Logf("Contents = %++v", data)
+	}
+}
+
+func TestListWithDelimiter(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	data, err := b.List("photos/2006/", "/", "some-marker", 1000)
+	if err != nil || len(data.Contents) != 0 {
+		t.Errorf("Failed for List: %v", err)
+	} else {
+		t.Logf("Contents = %++v", data)
+	}
+
+}
+
+func TestPutReader(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	buf := bytes.NewBufferString("content")
+	err := b.PutReader("name", buf, int64(buf.Len()), "content-type", oss.Private, oss.Options{})
+	if err != nil {
+		t.Errorf("Failed for PutReader: %v", err)
+	}
+	TestGetReader(t)
+}
+
+func TestExists(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	result, err := b.Exists("name")
+	if err != nil || result != true {
+		t.Errorf("Failed for Exists: %v", err)
+	}
+}
+
+func TestLocation(t *testing.T) {
+	b := client.Bucket(TestBucket)
+	result, err := b.Location()
+
+	if err != nil || result != string(TestRegion) {
+		t.Errorf("Failed for Location: %v %s", err, result)
+	}
+}
+
+func TestACL(t *testing.T) {
+	b := client.Bucket(TestBucket)
+	result, err := b.ACL()
+
+	if err != nil {
+		t.Errorf("Failed for ACL: %v", err)
+	} else {
+		t.Logf("AccessControlPolicy: %++v", result)
+	}
+}
+
+func TestDelObject(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	err := b.Del("name")
+	if err != nil {
+		t.Errorf("Failed for Del: %v", err)
+	}
+}
+
+func TestDelMultiObjects(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	objects := []oss.Object{oss.Object{Key: "newname"}}
+	err := b.DelMulti(oss.Delete{
+		Quiet:   false,
+		Objects: objects,
+	})
+	if err != nil {
+		t.Errorf("Failed for DelMulti: %v", err)
+	}
+}
+
+func TestGetService(t *testing.T) {
+	bucketList, err := client.GetService()
+	if err != nil {
+		t.Errorf("Unable to get service: %v", err)
+	} else {
+		t.Logf("GetService: %++v", bucketList)
+	}
+}
+
+func TestDelBucket(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+	err := b.DelBucket()
+	if err != nil {
+		t.Errorf("Failed for DelBucket: %v", err)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go
new file mode 100644
index 00000000..7c0d2549
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go
@@ -0,0 +1,14 @@
+package oss_test
+
+import (
+	"github.com/denverdino/aliyungo/oss"
+)
+
+//Modify with your Access Key Id and Access Key Secret
+const (
+	TestAccessKeyId     = "MY_ACCESS_KEY_ID"
+	TestAccessKeySecret = "MY_ACCESS_KEY_ID"
+	TestIAmRich         = false
+	TestRegion          = oss.Beijing
+	TestBucket          = "denverdino"
+)
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go
new file mode 100644
index 00000000..ebdb0477
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go
@@ -0,0 +1,23 @@
+package oss
+
+import (
+	"github.com/denverdino/aliyungo/util"
+)
+
+var originalStrategy = attempts
+
+func SetAttemptStrategy(s *util.AttemptStrategy) {
+	if s == nil {
+		attempts = originalStrategy
+	} else {
+		attempts = *s
+	}
+}
+
+func SetListPartsMax(n int) {
+	listPartsMax = n
+}
+
+func SetListMultiMax(n int) {
+	listMultiMax = n
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go
new file mode 100644
index 00000000..454d53b7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go
@@ -0,0 +1,460 @@
+package oss
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/base64"
+	"encoding/hex"
+	"encoding/xml"
+	"errors"
+	"io"
+	//"log"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Multi represents an unfinished multipart upload.
+//
+// Multipart uploads allow sending big objects in smaller chunks.
+// After all parts have been sent, the upload must be explicitly
+// completed by calling Complete with the list of parts.
+
+type Multi struct {
+	Bucket   *Bucket
+	Key      string
+	UploadId string
+}
+
+// That's the default. Here just for testing.
+var listMultiMax = 1000
+
+type listMultiResp struct {
+	NextKeyMarker      string
+	NextUploadIdMarker string
+	IsTruncated        bool
+	Upload             []Multi
+	CommonPrefixes     []string `xml:"CommonPrefixes>Prefix"`
+}
+
+// ListMulti returns the list of unfinished multipart uploads in b.
+//
+// The prefix parameter limits the response to keys that begin with the
+// specified prefix. You can use prefixes to separate a bucket into different
+// groupings of keys (to get the feeling of folders, for example).
+//
+// The delim parameter causes the response to group all of the keys that
+// share a common prefix up to the next delimiter in a single entry within
+// the CommonPrefixes field. You can use delimiters to separate a bucket
+// into different groupings of keys, similar to how folders would work.
+//
+func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
+	params := make(url.Values)
+	params.Set("uploads", "")
+	params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10))
+	params.Set("prefix", prefix)
+	params.Set("delimiter", delim)
+
+	for attempt := attempts.Start(); attempt.Next(); {
+		req := &request{
+			method: "GET",
+			bucket: b.Name,
+			params: params,
+		}
+		var resp listMultiResp
+		err := b.Client.query(req, &resp)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		for i := range resp.Upload {
+			multi := &resp.Upload[i]
+			multi.Bucket = b
+			multis = append(multis, multi)
+		}
+		prefixes = append(prefixes, resp.CommonPrefixes...)
+		if !resp.IsTruncated {
+			return multis, prefixes, nil
+		}
+		params.Set("key-marker", resp.NextKeyMarker)
+		params.Set("upload-id-marker", resp.NextUploadIdMarker)
+		attempt = attempts.Start() // Last request worked.
+	}
+	panic("unreachable")
+}
+
+// Multi returns a multipart upload handler for the provided key
+// inside b. If a multipart upload exists for key, it is returned,
+// otherwise a new multipart upload is initiated with contType and perm.
+func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) {
+	multis, _, err := b.ListMulti(key, "")
+	if err != nil && !hasCode(err, "NoSuchUpload") {
+		return nil, err
+	}
+	for _, m := range multis {
+		if m.Key == key {
+			return m, nil
+		}
+	}
+	return b.InitMulti(key, contType, perm, options)
+}
+
+// InitMulti initializes a new multipart upload at the provided
+// key inside b and returns a value for manipulating it.
+//
+func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
+	headers := make(http.Header)
+	headers.Set("Content-Length", "0")
+	headers.Set("Content-Type", contType)
+	headers.Set("x-oss-acl", string(perm))
+
+	options.addHeaders(headers)
+	params := make(url.Values)
+	params.Set("uploads", "")
+	req := &request{
+		method:  "POST",
+		bucket:  b.Name,
+		path:    key,
+		headers: headers,
+		params:  params,
+	}
+	var err error
+	var resp struct {
+		UploadId string `xml:"UploadId"`
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		err = b.Client.query(req, &resp)
+		if !shouldRetry(err) {
+			break
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
+}
+
+func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
+	headers := make(http.Header)
+	headers.Set("x-oss-copy-source", source)
+
+	options.addHeaders(headers)
+	params := make(url.Values)
+	params.Set("uploadId", m.UploadId)
+	params.Set("partNumber", strconv.FormatInt(int64(n), 10))
+
+	sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/"))
+	//log.Println("source: ", source)
+	//log.Println("sourceBucket: ", sourceBucket.Name)
+	//log.Println("HEAD: ", strings.Split(source, "/")[2])
+	sourceMeta, err := sourceBucket.Head(strings.Split(source, "/")[2], nil)
+	if err != nil {
+		return nil, Part{}, err
+	}
+
+	for attempt := attempts.Start(); attempt.Next(); {
+		req := &request{
+			method:  "PUT",
+			bucket:  m.Bucket.Name,
+			path:    m.Key,
+			headers: headers,
+			params:  params,
+		}
+		resp := &CopyObjectResult{}
+		err = m.Bucket.Client.query(req, resp)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, Part{}, err
+		}
+		if resp.ETag == "" {
+			return nil, Part{}, errors.New("part upload succeeded with no ETag")
+		}
+		return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil
+	}
+	panic("unreachable")
+}
+
+// PutPart sends part n of the multipart upload, reading all the content from r.
+// Each part, except for the last one, must be at least 5MB in size.
+//
+func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
+	partSize, _, md5b64, err := seekerInfo(r)
+	if err != nil {
+		return Part{}, err
+	}
+	return m.putPart(n, r, partSize, md5b64)
+}
+
+func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
+	headers := make(http.Header)
+	headers.Set("Content-Length", strconv.FormatInt(partSize, 10))
+	headers.Set("Content-MD5", md5b64)
+
+	params := make(url.Values)
+	params.Set("uploadId", m.UploadId)
+	params.Set("partNumber", strconv.FormatInt(int64(n), 10))
+
+	for attempt := attempts.Start(); attempt.Next(); {
+		_, err := r.Seek(0, 0)
+		if err != nil {
+			return Part{}, err
+		}
+		req := &request{
+			method:  "PUT",
+			bucket:  m.Bucket.Name,
+			path:    m.Key,
+			headers: headers,
+			params:  params,
+			payload: r,
+		}
+		err = m.Bucket.Client.prepare(req)
+		if err != nil {
+			return Part{}, err
+		}
+		resp, err := m.Bucket.Client.run(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return Part{}, err
+		}
+		etag := resp.Header.Get("ETag")
+		if etag == "" {
+			return Part{}, errors.New("part upload succeeded with no ETag")
+		}
+		return Part{n, etag, partSize}, nil
+	}
+	panic("unreachable")
+}
+
+func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
+	_, err = r.Seek(0, 0)
+	if err != nil {
+		return 0, "", "", err
+	}
+	digest := md5.New()
+	size, err = io.Copy(digest, r)
+	if err != nil {
+		return 0, "", "", err
+	}
+	sum := digest.Sum(nil)
+	md5hex = hex.EncodeToString(sum)
+	md5b64 = base64.StdEncoding.EncodeToString(sum)
+	return size, md5hex, md5b64, nil
+}
+
+type Part struct {
+	N    int `xml:"PartNumber"`
+	ETag string
+	Size int64
+}
+
+type partSlice []Part
+
+func (s partSlice) Len() int           { return len(s) }
+func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
+func (s partSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+type listPartsResp struct {
+	NextPartNumberMarker string
+	IsTruncated          bool
+	Part                 []Part
+}
+
+// That's the default. Here just for testing.
+var listPartsMax = 1000
+
+// ListParts for backcompatability. See the documentation for ListPartsFull
+func (m *Multi) ListParts() ([]Part, error) {
+	return m.ListPartsFull(0, listPartsMax)
+}
+
+// ListPartsFull returns the list of previously uploaded parts in m,
+// ordered by part number (Only parts with higher part numbers than
+// partNumberMarker will be listed). Only up to maxParts parts will be
+// returned.
+//
+func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) {
+	if maxParts > listPartsMax {
+		maxParts = listPartsMax
+	}
+
+	params := make(url.Values)
+	params.Set("uploadId", m.UploadId)
+	params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10))
+	params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10))
+
+	var parts partSlice
+	for attempt := attempts.Start(); attempt.Next(); {
+		req := &request{
+			method: "GET",
+			bucket: m.Bucket.Name,
+			path:   m.Key,
+			params: params,
+		}
+		var resp listPartsResp
+		err := m.Bucket.Client.query(req, &resp)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		parts = append(parts, resp.Part...)
+		if !resp.IsTruncated {
+			sort.Sort(parts)
+			return parts, nil
+		}
+		params.Set("part-number-marker", resp.NextPartNumberMarker)
+		attempt = attempts.Start() // Last request worked.
+	}
+	panic("unreachable")
+}
+
+type ReaderAtSeeker interface {
+	io.ReaderAt
+	io.ReadSeeker
+}
+
+// PutAll sends all of r via a multipart upload with parts no larger
+// than partSize bytes, which must be set to at least 5MB.
+// Parts previously uploaded are either reused if their checksum
+// and size match the new part, or otherwise overwritten with the
+// new content.
+// PutAll returns all the parts of m (reused or not).
+func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
+	old, err := m.ListParts()
+	if err != nil && !hasCode(err, "NoSuchUpload") {
+		return nil, err
+	}
+	reuse := 0   // Index of next old part to consider reusing.
+	current := 1 // Part number of latest good part handled.
+	totalSize, err := r.Seek(0, 2)
+	if err != nil {
+		return nil, err
+	}
+	first := true // Must send at least one empty part if the file is empty.
+	var result []Part
+NextSection:
+	for offset := int64(0); offset < totalSize || first; offset += partSize {
+		first = false
+		if offset+partSize > totalSize {
+			partSize = totalSize - offset
+		}
+		section := io.NewSectionReader(r, offset, partSize)
+		_, md5hex, md5b64, err := seekerInfo(section)
+		if err != nil {
+			return nil, err
+		}
+		for reuse < len(old) && old[reuse].N <= current {
+			// Looks like this part was already sent.
+			part := &old[reuse]
+			etag := `"` + md5hex + `"`
+			if part.N == current && part.Size == partSize && part.ETag == etag {
+				// Checksum matches. Reuse the old part.
+				result = append(result, *part)
+				current++
+				continue NextSection
+			}
+			reuse++
+		}
+
+		// Part wasn't found or doesn't match. Send it.
+		part, err := m.putPart(current, section, partSize, md5b64)
+		if err != nil {
+			return nil, err
+		}
+		result = append(result, part)
+		current++
+	}
+	return result, nil
+}
+
+type completeUpload struct {
+	XMLName xml.Name      `xml:"CompleteMultipartUpload"`
+	Parts   completeParts `xml:"Part"`
+}
+
+type completePart struct {
+	PartNumber int
+	ETag       string
+}
+
+type completeParts []completePart
+
+func (p completeParts) Len() int           { return len(p) }
+func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
+func (p completeParts) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+// Complete assembles the given previously uploaded parts into the
+// final object. This operation may take several minutes.
+//
+func (m *Multi) Complete(parts []Part) error {
+	params := make(url.Values)
+	params.Set("uploadId", m.UploadId)
+
+	c := completeUpload{}
+	for _, p := range parts {
+		c.Parts = append(c.Parts, completePart{p.N, p.ETag})
+	}
+	sort.Sort(c.Parts)
+	data, err := xml.Marshal(&c)
+	if err != nil {
+		return err
+	}
+	for attempt := attempts.Start(); attempt.Next(); {
+		req := &request{
+			method:  "POST",
+			bucket:  m.Bucket.Name,
+			path:    m.Key,
+			params:  params,
+			payload: bytes.NewReader(data),
+		}
+		err := m.Bucket.Client.query(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		return err
+	}
+	panic("unreachable")
+}
+
+// Abort deletes an unifinished multipart upload and any previously
+// uploaded parts for it.
+//
+// After a multipart upload is aborted, no additional parts can be
+// uploaded using it. However, if any part uploads are currently in
+// progress, those part uploads might or might not succeed. As a result,
+// it might be necessary to abort a given multipart upload multiple
+// times in order to completely free all storage consumed by all parts.
+//
+// NOTE: If the described scenario happens to you, please report back to
+// the goamz authors with details. In the future such retrying should be
+// handled internally, but it's not clear what happens precisely (Is an
+// error returned? Is the issue completely undetectable?).
+//
+func (m *Multi) Abort() error {
+	params := make(url.Values)
+	params.Set("uploadId", m.UploadId)
+
+	for attempt := attempts.Start(); attempt.Next(); {
+		req := &request{
+			method: "DELETE",
+			bucket: m.Bucket.Name,
+			path:   m.Key,
+			params: params,
+		}
+		err := m.Bucket.Client.query(req, nil)
+		if shouldRetry(err) && attempt.HasNext() {
+			continue
+		}
+		return err
+	}
+	panic("unreachable")
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go
new file mode 100644
index 00000000..6ecd63be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go
@@ -0,0 +1,161 @@
+package oss_test
+
+import (
+	//"encoding/xml"
+	"github.com/denverdino/aliyungo/oss"
+	"testing"
+	//"io"
+	//"io/ioutil"
+	"strings"
+)
+
+func TestCreateBucketMulti(t *testing.T) {
+	TestCreateBucket(t)
+}
+
+func TestInitMulti(t *testing.T) {
+	b := client.Bucket(TestBucket)
+
+	metadata := make(map[string][]string)
+	metadata["key1"] = []string{"value1"}
+	metadata["key2"] = []string{"value2"}
+	options := oss.Options{
+		ServerSideEncryption: true,
+		Meta:                 metadata,
+		ContentEncoding:      "text/utf8",
+		CacheControl:         "no-cache",
+		ContentMD5:           "0000000000000000",
+	}
+
+	multi, err := b.InitMulti("multi", "text/plain", oss.Private, options)
+	if err != nil {
+		t.Errorf("Failed for InitMulti: %v", err)
+	} else {
+		t.Logf("InitMulti result: %++v", multi)
+	}
+}
+
+func TestMultiReturnOld(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Errorf("Failed for Multi: %v", err)
+	} else {
+		t.Logf("Multi result: %++v", multi)
+	}
+
+}
+
+func TestPutPart(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Fatalf("Failed for Multi: %v", err)
+	}
+
+	part, err := multi.PutPart(1, strings.NewReader("<part 1>"))
+	if err != nil {
+		t.Errorf("Failed for PutPart: %v", err)
+	} else {
+		t.Logf("PutPart result: %++v", part)
+	}
+
+}
+func TestPutPartCopy(t *testing.T) {
+
+	TestPutObject(t)
+
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Fatalf("Failed for Multi: %v", err)
+	}
+
+	res, part, err := multi.PutPartCopy(2, oss.CopyOptions{}, b.Path("name"))
+	if err != nil {
+		t.Errorf("Failed for PutPartCopy: %v", err)
+	} else {
+		t.Logf("PutPartCopy result: %++v %++v", part, res)
+	}
+	TestDelObject(t)
+}
+
+func TestListParts(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Fatalf("Failed for Multi: %v", err)
+	}
+
+	parts, err := multi.ListParts()
+	if err != nil {
+		t.Errorf("Failed for ListParts: %v", err)
+	} else {
+		t.Logf("ListParts result: %++v", parts)
+	}
+}
+func TestListMulti(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	multis, prefixes, err := b.ListMulti("", "/")
+	if err != nil {
+		t.Errorf("Failed for ListMulti: %v", err)
+	} else {
+		t.Logf("ListMulti result : %++v  %++v", multis, prefixes)
+	}
+}
+func TestMultiAbort(t *testing.T) {
+
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Fatalf("Failed for Multi: %v", err)
+	}
+
+	err = multi.Abort()
+	if err != nil {
+		t.Errorf("Failed for Abort: %v", err)
+	}
+
+}
+
+func TestPutAll(t *testing.T) {
+	TestInitMulti(t)
+	// Don't retry the NoSuchUpload error.
+	b := client.Bucket(TestBucket)
+
+	multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{})
+	if err != nil {
+		t.Fatalf("Failed for Multi: %v", err)
+	}
+
+	// Must send at least one part, so that completing it will work.
+	parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5)
+	if err != nil {
+		t.Errorf("Failed for PutAll: %v", err)
+	} else {
+		t.Logf("PutAll result: %++v", parts)
+	}
+	//	// Must send at least one part, so that completing it will work.
+	//	err = multi.Complete(parts)
+	//	if err != nil {
+	//		t.Errorf("Failed for Complete: %v", err)
+	//	}
+	err = multi.Abort()
+	if err != nil {
+		t.Errorf("Failed for Abort: %v", err)
+	}
+}
+
+func TestCleanUp(t *testing.T) {
+	TestDelBucket(t)
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go
new file mode 100644
index 00000000..2bba7382
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go
@@ -0,0 +1,53 @@
+package oss
+
+import (
+	"fmt"
+)
+
+// Region represents OSS region
+type Region string
+
+// Constants of region definition
+const (
+	Hangzhou      = Region("oss-cn-hangzhou")
+	Qingdao       = Region("oss-cn-qingdao")
+	Beijing       = Region("oss-cn-beijing")
+	Hongkong      = Region("oss-cn-hongkong")
+	Shenzhen      = Region("oss-cn-shenzhen")
+	USWest1       = Region("oss-us-west-1")
+	DefaultRegion = Hangzhou
+)
+
+// GetEndpoint returns endpoint of region
+func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string {
+	if internal {
+		return r.GetInternalEndpoint(bucket, secure)
+	}
+	return r.GetInternetEndpoint(bucket, secure)
+}
+
+func getProtocol(secure bool) string {
+	protocol := "http"
+	if secure {
+		protocol = "https"
+	}
+	return protocol
+}
+
+// GetInternetEndpoint returns internet endpoint of region
+func (r Region) GetInternetEndpoint(bucket string, secure bool) string {
+	protocol := getProtocol(secure)
+	if bucket == "" {
+		return fmt.Sprintf("%s://oss.aliyuncs.com", protocol)
+	}
+	return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r))
+}
+
+// GetInternalEndpoint returns internal endpoint of region
+func (r Region) GetInternalEndpoint(bucket string, secure bool) string {
+	protocol := getProtocol(secure)
+	if bucket == "" {
+		return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol)
+	}
+	return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r))
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go
new file mode 100644
index 00000000..a261644a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go
@@ -0,0 +1,105 @@
+package oss
+
+import (
+	"github.com/denverdino/aliyungo/util"
+	//"log"
+	"net/http"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+const HeaderOSSPrefix = "x-oss-"
+
+var ossParamsToSign = map[string]bool{
+	"acl":                          true,
+	"delete":                       true,
+	"location":                     true,
+	"logging":                      true,
+	"notification":                 true,
+	"partNumber":                   true,
+	"policy":                       true,
+	"requestPayment":               true,
+	"torrent":                      true,
+	"uploadId":                     true,
+	"uploads":                      true,
+	"versionId":                    true,
+	"versioning":                   true,
+	"versions":                     true,
+	"response-content-type":        true,
+	"response-content-language":    true,
+	"response-expires":             true,
+	"response-cache-control":       true,
+	"response-content-disposition": true,
+	"response-content-encoding":    true,
+}
+
+func (client *Client) signRequest(request *request) {
+	query := request.params
+
+	urlSignature := query.Get("OSSAccessKeyId") != ""
+
+	headers := request.headers
+	contentMd5 := headers.Get("Content-Md5")
+	contentType := headers.Get("Content-Type")
+	date := ""
+	if urlSignature {
+		date = query.Get("Expires")
+	} else {
+		date = headers.Get("Date")
+	}
+
+	resource := request.path
+	if request.bucket != "" {
+		resource = "/" + request.bucket + request.path
+	}
+	params := make(url.Values)
+	for k, v := range query {
+		if ossParamsToSign[k] {
+			params[k] = v
+		}
+	}
+
+	if len(params) > 0 {
+		resource = resource + "?" + util.Encode(params)
+	}
+
+	canonicalizedResource := resource
+
+	_, canonicalizedHeader := canonicalizeHeader(headers)
+
+	stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource
+
+	//log.Println("stringToSign: ", stringToSign)
+	signature := util.CreateSignature(stringToSign, client.AccessKeySecret)
+
+	if query.Get("OSSAccessKeyId") != "" {
+		query.Set("Signature", signature)
+	} else {
+		headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature)
+	}
+}
+
+//Have to break the abstraction to append keys with lower case.
+func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) {
+	var canonicalizedHeaders []string
+	newHeaders = http.Header{}
+
+	for k, v := range headers {
+		if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) {
+			newHeaders[lower] = v
+			canonicalizedHeaders = append(canonicalizedHeaders, lower)
+		} else {
+			newHeaders[k] = v
+		}
+	}
+
+	sort.Strings(canonicalizedHeaders)
+
+	var canonicalizedHeader string
+
+	for _, k := range canonicalizedHeaders {
+		canonicalizedHeader += k + ":" + headers.Get(k) + "\n"
+	}
+	return newHeaders, canonicalizedHeader
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go
new file mode 100644
index 00000000..e71ed19f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go
@@ -0,0 +1,74 @@
+package util
+
+import (
+	"time"
+)
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully. This is an internal type used by the
+// implementation of other goamz packages.
+type AttemptStrategy struct {
+	Total time.Duration // total duration of attempt.
+	Delay time.Duration // interval between each try in the burst.
+	Min   int           // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+	strategy AttemptStrategy
+	last     time.Time
+	end      time.Time
+	force    bool
+	count    int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+	now := time.Now()
+	return &Attempt{
+		strategy: s,
+		last:     now,
+		end:      now.Add(s.Total),
+		force:    true,
+	}
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+func (a *Attempt) Next() bool {
+	now := time.Now()
+	sleep := a.nextSleep(now)
+	if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+		return false
+	}
+	a.force = false
+	if sleep > 0 && a.count > 0 {
+		time.Sleep(sleep)
+		now = time.Now()
+	}
+	a.count++
+	a.last = now
+	return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+	sleep := a.strategy.Delay - now.Sub(a.last)
+	if sleep < 0 {
+		return 0
+	}
+	return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+	if a.force || a.strategy.Min > a.count {
+		return true
+	}
+	now := time.Now()
+	if now.Add(a.nextSleep(now)).Before(a.end) {
+		a.force = true
+		return true
+	}
+	return false
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go
new file mode 100644
index 00000000..50e9be7a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go
@@ -0,0 +1,90 @@
+package util
+
+import (
+	"testing"
+	"time"
+)
+
+func TestAttemptTiming(t *testing.T) {
+	testAttempt := AttemptStrategy{
+		Total: 0.25e9,
+		Delay: 0.1e9,
+	}
+	want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
+	got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
+	t0 := time.Now()
+	for a := testAttempt.Start(); a.Next(); {
+		got = append(got, time.Now().Sub(t0))
+	}
+	got = append(got, time.Now().Sub(t0))
+	if len(got) != len(want) {
+		t.Fatalf("Failed!")
+	}
+	const margin = 0.01e9
+	for i, got := range want {
+		lo := want[i] - margin
+		hi := want[i] + margin
+		if got < lo || got > hi {
+			t.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
+		}
+	}
+}
+
+func TestAttemptNextHasNext(t *testing.T) {
+	a := AttemptStrategy{}.Start()
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if a.Next() {
+		t.Fatalf("Failed!")
+	}
+
+	a = AttemptStrategy{}.Start()
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if a.HasNext() {
+		t.Fatalf("Failed!")
+	}
+	if a.Next() {
+		t.Fatalf("Failed!")
+	}
+	a = AttemptStrategy{Total: 2e8}.Start()
+
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if !a.HasNext() {
+		t.Fatalf("Failed!")
+	}
+	time.Sleep(2e8)
+
+	if !a.HasNext() {
+		t.Fatalf("Failed!")
+	}
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if a.Next() {
+		t.Fatalf("Failed!")
+	}
+
+	a = AttemptStrategy{Total: 1e8, Min: 2}.Start()
+	time.Sleep(1e8)
+
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if !a.HasNext() {
+		t.Fatalf("Failed!")
+	}
+	if !a.Next() {
+		t.Fatalf("Failed!")
+	}
+	if a.HasNext() {
+		t.Fatalf("Failed!")
+	}
+	if a.Next() {
+		t.Fatalf("Failed!")
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go
new file mode 100644
index 00000000..54a63569
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go
@@ -0,0 +1,113 @@
+package util
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+//ConvertToQueryValues converts the struct to url.Values
+func ConvertToQueryValues(ifc interface{}) url.Values {
+	values := url.Values{}
+	SetQueryValues(ifc, &values)
+	return values
+}
+
+//SetQueryValues sets the struct to existing url.Values following ECS encoding rules
+func SetQueryValues(ifc interface{}, values *url.Values) {
+	setQueryValues(ifc, values, "")
+}
+
+func setQueryValues(i interface{}, values *url.Values, prefix string) {
+	elem := reflect.ValueOf(i)
+	if elem.Kind() == reflect.Ptr {
+		elem = elem.Elem()
+	}
+	elemType := elem.Type()
+	for i := 0; i < elem.NumField(); i++ {
+		fieldName := elemType.Field(i).Name
+		field := elem.Field(i)
+		// TODO Use Tag for validation
+		// tag := typ.Field(i).Tag.Get("tagname")
+		kind := field.Kind()
+		if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() {
+			continue
+		}
+		if kind == reflect.Ptr {
+			field = field.Elem()
+		}
+		var value string
+		switch field.Interface().(type) {
+		case int, int8, int16, int32, int64:
+			i := field.Int()
+			if i != 0 {
+				value = strconv.FormatInt(i, 10)
+			}
+		case uint, uint8, uint16, uint32, uint64:
+			i := field.Uint()
+			if i != 0 {
+				value = strconv.FormatUint(i, 10)
+			}
+		case float32:
+			value = strconv.FormatFloat(field.Float(), 'f', 4, 32)
+		case float64:
+			value = strconv.FormatFloat(field.Float(), 'f', 4, 64)
+		case []byte:
+			value = string(field.Bytes())
+		case bool:
+			value = strconv.FormatBool(field.Bool())
+		case string:
+			value = field.String()
+		case []string:
+			l := field.Len()
+			if l > 0 {
+				strArray := make([]string, l)
+				for i := 0; i < l; i++ {
+					strArray[i] = field.Index(i).String()
+				}
+				bytes, err := json.Marshal(strArray)
+				if err == nil {
+					value = string(bytes)
+				} else {
+					log.Printf("Failed to convert JSON: %v", err)
+				}
+			}
+		case time.Time:
+			t := field.Interface().(time.Time)
+			value = GetISO8601TimeStamp(t)
+
+		default:
+			if kind == reflect.Slice { //Array of structs
+				l := field.Len()
+				for j := 0; j < l; j++ {
+					prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1))
+					ifc := field.Index(j).Interface()
+					log.Printf("%s : %v", prefixName, ifc)
+					if ifc != nil {
+						setQueryValues(ifc, values, prefixName)
+					}
+				}
+			} else {
+				ifc := field.Interface()
+				if ifc != nil {
+					SetQueryValues(ifc, values)
+					continue
+				}
+			}
+		}
+		if value != "" {
+			name := elemType.Field(i).Tag.Get("ArgName")
+			if name == "" {
+				name = fieldName
+			}
+			if prefix != "" {
+				name = prefix + name
+			}
+			values.Set(name, value)
+		}
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go
new file mode 100644
index 00000000..a49eb215
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go
@@ -0,0 +1,46 @@
+package util
+
+import (
+	"testing"
+	"time"
+)
+
+type SubStruct struct {
+	A string
+	B int
+}
+
+type TestStruct struct {
+	Format      string
+	Version     string
+	AccessKeyId string
+	Timestamp   time.Time
+	Empty       string
+	IntValue    int      `ArgName:"int-value"`
+	BoolPtr     *bool    `ArgName:"bool-ptr"`
+	IntPtr      *int     `ArgName:"int-ptr"`
+	StringArray []string `ArgName:"str-array"`
+	StructArray []SubStruct
+}
+
+func TestConvertToQueryValues(t *testing.T) {
+	boolValue := true
+	request := TestStruct{
+		Format:      "JSON",
+		Version:     "1.0",
+		Timestamp:   time.Date(2015, time.Month(5), 26, 1, 2, 3, 4, time.UTC),
+		IntValue:    10,
+		BoolPtr:     &boolValue,
+		StringArray: []string{"abc", "xyz"},
+		StructArray: []SubStruct{
+			SubStruct{A: "a", B: 1},
+			SubStruct{A: "x", B: 2},
+		},
+	}
+	result := ConvertToQueryValues(&request).Encode()
+	const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D"
+	if result != expectedResult {
+		t.Error("Incorrect encoding: ", result)
+	}
+
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go
new file mode 100644
index 00000000..121d6e62
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go
@@ -0,0 +1,62 @@
+package util
+
+import (
+	"fmt"
+	"time"
+)
+
+// GetISO8601TimeStamp gets timestamp string in ISO8601 format
+func GetISO8601TimeStamp(ts time.Time) string {
+	t := ts.UTC()
+	return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
+}
+
+const formatISO8601 = "2006-01-02T15:04:05Z"
+const jsonFormatISO8601 = `"` + formatISO8601 + `"`
+
+// A ISO6801Time represents a time in ISO8601 format
+type ISO6801Time time.Time
+
+// New constructs a new iso8601.Time instance from an existing
+// time.Time instance.  This causes the nanosecond field to be set to
+// 0, and its time zone set to a fixed zone with no offset from UTC
+// (but it is *not* UTC itself).
+func New(t time.Time) ISO6801Time {
+	return ISO6801Time(time.Date(
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		0,
+		time.UTC,
+	))
+}
+
+// IsDefault checks if the time is default
+func (it *ISO6801Time) IsDefault() bool {
+	return *it == ISO6801Time{}
+}
+
+// MarshalJSON serializes the ISO6801Time into JSON string
+func (it ISO6801Time) MarshalJSON() ([]byte, error) {
+	return []byte(time.Time(it).Format(jsonFormatISO8601)), nil
+}
+
+// UnmarshalJSON deserializes the ISO6801Time from JSON string
+func (it *ISO6801Time) UnmarshalJSON(data []byte) error {
+	if string(data) == "\"\"" {
+		return nil
+	}
+	t, err := time.ParseInLocation(jsonFormatISO8601, string(data), time.UTC)
+	if err == nil {
+		*it = ISO6801Time(t)
+	}
+	return err
+}
+
+// String returns the time in ISO6801Time format
+func (it ISO6801Time) String() string {
+	return time.Time(it).String()
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go
new file mode 100644
index 00000000..284a23c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go
@@ -0,0 +1,50 @@
+package util
+
+import (
+	"encoding/json"
+	"testing"
+	"time"
+)
+
+func TestISO8601Time(t *testing.T) {
+	now := New(time.Now().UTC())
+
+	data, err := json.Marshal(now)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = time.Parse(`"`+formatISO8601+`"`, string(data))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	var now2 ISO6801Time
+	err = json.Unmarshal(data, &now2)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if now != now2 {
+		t.Fatalf("Time %s does not equal expected %s", now2, now)
+	}
+
+	if now.String() != now2.String() {
+		t.Fatalf("String format for %s does not equal expected %s", now2, now)
+	}
+
+	type TestTimeStruct struct {
+		A int
+		B *ISO6801Time
+	}
+	var testValue TestTimeStruct
+	err = json.Unmarshal([]byte("{\"A\": 1, \"B\":\"\"}"), &testValue)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("%v", testValue)
+	if !testValue.B.IsDefault() {
+		t.Fatal("Invaid Unmarshal result for ISO6801Time from empty value")
+	}
+
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go
new file mode 100644
index 00000000..a00b27c1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go
@@ -0,0 +1,40 @@
+package util
+
+import (
+	"crypto/hmac"
+	"crypto/sha1"
+	"encoding/base64"
+	"net/url"
+	"strings"
+)
+
+//CreateSignature creates signature for string following Aliyun rules
+func CreateSignature(stringToSignature, accessKeySecret string) string {
+	// Crypto by HMAC-SHA1
+	hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret))
+	hmacSha1.Write([]byte(stringToSignature))
+	sign := hmacSha1.Sum(nil)
+
+	// Encode to Base64
+	base64Sign := base64.StdEncoding.EncodeToString(sign)
+
+	return base64Sign
+}
+
+func percentReplace(str string) string {
+	str = strings.Replace(str, "+", "%20", -1)
+	str = strings.Replace(str, "*", "%2A", -1)
+	str = strings.Replace(str, "%7E", "~", -1)
+
+	return str
+}
+
+// CreateSignatureForRequest creates signature for query string values
+func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string {
+
+	canonicalizedQueryString := percentReplace(values.Encode())
+
+	stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString)
+
+	return CreateSignature(stringToSign, accessKeySecret)
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go
new file mode 100644
index 00000000..e5c22cca
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go
@@ -0,0 +1,14 @@
+package util
+
+import (
+	"testing"
+)
+
+func TestCreateSignature(t *testing.T) {
+
+	str := "GET&%2F&AccessKeyId%3Dtestid%26Action%3DDescribeRegions%26Format%3DXML%26RegionId%3Dregion1%26SignatureMethod%3DHMAC-SHA1%26SignatureNonce%3DNwDAxvLU6tFE0DVb%26SignatureVersion%3D1.0%26TimeStamp%3D2012-12-26T10%253A33%253A56Z%26Version%3D2014-05-26"
+
+	signature := CreateSignature(str, "testsecret")
+
+	t.Log(signature)
+}
diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go
new file mode 100644
index 00000000..f2826684
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go
@@ -0,0 +1,54 @@
+package util
+
+import (
+	"bytes"
+	"math/rand"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"time"
+)
+
+//CreateRandomString create random string
+func CreateRandomString() string {
+
+	rand.Seed(time.Now().UnixNano())
+	randInt := rand.Int63()
+	randStr := strconv.FormatInt(randInt, 36)
+
+	return randStr
+}
+
+// Encode encodes the values into ``URL encoded'' form
+// ("acl&bar=baz&foo=quux") sorted by key.
+func Encode(v url.Values) string {
+	if v == nil {
+		return ""
+	}
+	var buf bytes.Buffer
+	keys := make([]string, 0, len(v))
+	for k := range v {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	for _, k := range keys {
+		vs := v[k]
+		prefix := url.QueryEscape(k)
+		for _, v := range vs {
+			if buf.Len() > 0 {
+				buf.WriteByte('&')
+			}
+			buf.WriteString(prefix)
+			if v != "" {
+				buf.WriteString("=")
+				buf.WriteString(url.QueryEscape(v))
+			}
+		}
+	}
+	return buf.String()
+}
+
+func GetGMTime() string {
+	return time.Now().UTC().Format(http.TimeFormat)
+}