From 9e4975d8ffa1672b8f1698c423d4dcd13bc787a4 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Mon, 11 May 2015 23:26:51 +0800 Subject: [PATCH 01/20] Support OSS driver Signed-off-by: Li Yi --- cmd/registry/main.go | 1 + docs/storage-drivers/oss.md | 23 + registry/storage/driver/oss/oss.go | 813 ++++++++++++++++++++++++ registry/storage/driver/oss/oss_test.go | 152 +++++ 4 files changed, 989 insertions(+) create mode 100755 docs/storage-drivers/oss.md create mode 100755 registry/storage/driver/oss/oss.go create mode 100755 registry/storage/driver/oss/oss_test.go diff --git a/cmd/registry/main.go b/cmd/registry/main.go index 7950a448c..e766f7c38 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -27,6 +27,7 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" + _ "github.com/docker/distribution/registry/storage/driver/oss" _ "github.com/docker/distribution/registry/storage/driver/s3" _ "github.com/docker/distribution/registry/storage/driver/swift" "github.com/docker/distribution/uuid" diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md new file mode 100755 index 000000000..a8acd2775 --- /dev/null +++ b/docs/storage-drivers/oss.md @@ -0,0 +1,23 @@ + + +# OSS storage driver + +An implementation of the `storagedriver.StorageDriver` interface which uses Aliyun OSS for object storage. + +## Parameters + +`accesskeyid`: Your access key ID. + +`accesskeysecret`: Your access key secret. + +`region`: The name of the aws region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region + +`bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). + +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. + +`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go new file mode 100755 index 000000000..f85c75411 --- /dev/null +++ b/registry/storage/driver/oss/oss.go @@ -0,0 +1,813 @@ +// Package oss provides a storagedriver.StorageDriver implementation to +// store blobs in Aliyun OSS cloud storage. +// +// This package leverages the denverdino/aliyungo client library for interfacing with +// oss. +// +// Because OSS is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that OSS guarantees only eventual consistency, so do not assume +// that a successful write will mean immediate access to the data written (although +// in most regions a new object put has guaranteed read after write). The only true +// guarantee is that once you call Stat and receive a certain file size, that much of +// the file is already accessible. +package oss + +import ( + "bytes" + "fmt" + "github.com/docker/distribution/context" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/denverdino/aliyungo/oss" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "oss" + +// minChunkSize defines the minimum multipart upload chunk size +// OSS API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from OSS in a list call +const listMax = 1000 + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKeyId string + AccessKeySecret string + Bucket string + Region oss.Region + Internal bool + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string +} + +func init() { + factory.Register(driverName, &ossDriverFactory{}) +} + +// ossDriverFactory implements the factory.StorageDriverFactory interface +type ossDriverFactory struct{} + +func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + Client *oss.Client + Bucket *oss.Bucket + ChunkSize int64 + Encrypt bool + RootDirectory string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskeyid"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["accesskeysecret"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + internalBool := false + internal, ok := parameters["internal"] + if ok { + internalBool, ok = internal.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + params := DriverParameters{ + AccessKeyId: fmt.Sprint(accessKey), + AccessKeySecret: fmt.Sprint(secretKey), + Bucket: fmt.Sprint(bucket), + Region: oss.Region(fmt.Sprint(regionName)), + ChunkSize: chunkSize, + RootDirectory: fmt.Sprint(rootDirectory), + Encrypt: encryptBool, + Secure: secureBool, + Internal: internalBool, + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyId, params.AccessKeySecret) + bucket := client.Bucket(params.Bucket) + + // Validate that the given credentials have at least read permissions in the + // given bucket scope. + if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { + return nil, err + } + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new OSS client while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + Client: client, + Bucket: bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + content, err := d.Bucket.Get(d.ossPath(path)) + if err != nil { + return nil, parseError(path, err) + } + return content, nil +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + headers := make(http.Header) + headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") + + resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) + if err != nil { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + partNumber := 1 + bytesRead := 0 + var putErrChan chan error + parts := []oss.Part{} + var part oss.Part + done := make(chan struct{}) // stopgap to free up waiting goroutines + + multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) + if err != nil { + return 0, err + } + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + if multi == nil { + // Parts should be empty if the multi is not initialized + panic("Unreachable") + } else { + if multi.Complete(parts) != nil { + multi.Abort() + } + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the OSS + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying OSS library should handle it, it doesn't seem to + // be part of the shouldRetry function (see denverdino/aliyungo/oss). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + var err error + var part oss.Part + + loop: + for retries := 0; retries < 5; retries++ { + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) + if err == nil { + break // success! + } + + // NOTE(stevvooe): This retry code tries to only retry under + // conditions where the OSS package does not. We may add oss + // error codes to the below if we see others bubble up in the + // application. Right now, the most troubling is + // RequestTimeout, which seems to only triggered when a tcp + // connection to OSS slows to a crawl. If the RequestTimeout + // ends up getting added to the OSS library and we don't see + // other errors, this retry loop can be removed. + switch err := err.(type) { + case *oss.Error: + switch err.Code { + case "RequestTimeout": + // allow retries on only this error. + default: + break loop + } + } + + backoff := 100 * time.Millisecond * time.Duration(retries+1) + logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) + time.Sleep(backoff) + } + + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, part) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.Bucket.Head(d.ossPath(path), nil) + if err != nil { + if ossErr, ok := err.(*oss.Error); !ok || ossErr.Code != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil { + currentLength = resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, + d.Bucket.Name+"/"+d.ossPath(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, part) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) + if err != nil { + return totalRead, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + _, part, err = multi.PutPartCopy(partNumber, + oss.CopyOptions{}, + d.Bucket.Name+"/"+d.ossPath(path)) + if err != nil { + return 0, err + } + + parts = append(parts, part) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(listResponse.Contents) == 1 { + if listResponse.Contents[0].Key != d.ossPath(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = listResponse.Contents[0].Size + + timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) + if err != nil { + return nil, err + } + fi.ModTime = timestamp + } + } else if len(listResponse.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, path string) ([]string, error) { + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.ossPath("") == "" { + prefix = "/" + } + + listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) + if err != nil { + return nil, err + } + + files := []string{} + directories := []string{} + + for { + for _, key := range listResponse.Contents { + files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) + } + + for _, commonPrefix := range listResponse.CommonPrefixes { + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) + } + + if listResponse.IsTruncated { + listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) + if err != nil { + return nil, err + } + } else { + break + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + logrus.Infof("Move from %s to %s", d.Bucket.Path("/"+d.ossPath(sourcePath)), d.ossPath(destPath)) + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.Bucket.PutCopy(d.ossPath(destPath), getPermissions(), + oss.CopyOptions{ + //Options: d.getOptions(), + //ContentType: d.getContentType() + }, + d.Bucket.Path(d.ossPath(sourcePath))) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil || len(listResponse.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + ossObjects := make([]oss.Object, listMax) + + for len(listResponse.Contents) > 0 { + for index, key := range listResponse.Contents { + ossObjects[index].Key = key.Key + } + + err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) + if err != nil { + return nil + } + + listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod + } + } + + expiresTime := time.Now().Add(20 * time.Minute) + logrus.Infof("expiresTime: %d", expiresTime) + + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresTime = et + } + } + logrus.Infof("expiresTime: %d", expiresTime) + testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) + logrus.Infof("testURL: %s", testURL) + return testURL, nil +} + +func (d *driver) ossPath(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the OSS bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).ossPath(path) +} + +func parseError(path string, err error) error { + if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func hasCode(err error, code string) bool { + ossErr, ok := err.(*oss.Error) + return ok && ossErr.Code == code +} + +func (d *driver) getOptions() oss.Options { + return oss.Options{ServerSideEncryption: d.Encrypt} +} + +func getPermissions() oss.ACL { + return oss.Private +} + +func (d *driver) getContentType() string { + return "application/octet-stream" +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go new file mode 100755 index 000000000..ecfe36e9d --- /dev/null +++ b/registry/storage/driver/oss/oss_test.go @@ -0,0 +1,152 @@ +package oss + +import ( + alioss "github.com/denverdino/aliyungo/oss" + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + "io/ioutil" + //"log" + "os" + "strconv" + "testing" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +type OSSDriverConstructor func(rootDirectory string) (*Driver, error) + +func init() { + accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") + secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") + bucket := os.Getenv("OSS_BUCKET") + region := os.Getenv("OSS_REGION") + internal := os.Getenv("OSS_INTERNAL") + encrypt := os.Getenv("OSS_ENCRYPT") + secure := os.Getenv("OSS_SECURE") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + ossDriverConstructor := func(rootDirectory string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := false + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + internalBool := false + if internal != "" { + internalBool, err = strconv.ParseBool(internal) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + alioss.Region(region), + internalBool, + encryptBool, + secureBool, + minChunkSize, + rootDirectory, + } + + return New(parameters) + } + + // Skip OSS storage driver tests if environment variable parameters are not provided + skipCheck := func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" + } + return "" + } + + driverConstructor := func() (storagedriver.StorageDriver, error) { + return ossDriverConstructor(root) + } + + testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) + + // ossConstructor := func() (*Driver, error) { + // return ossDriverConstructor(aws.GetRegion(region)) + // } + + RegisterOSSDriverSuite(ossDriverConstructor, skipCheck) + + // testsuites.RegisterIPCSuite(driverName, map[string]string{ + // "accesskey": accessKey, + // "secretkey": secretKey, + // "region": region.Name, + // "bucket": bucket, + // "encrypt": encrypt, + // }, skipCheck) + // } +} + +func RegisterOSSDriverSuite(ossDriverConstructor OSSDriverConstructor, skipCheck testsuites.SkipCheck) { + check.Suite(&OSSDriverSuite{ + Constructor: ossDriverConstructor, + SkipCheck: skipCheck, + }) +} + +type OSSDriverSuite struct { + Constructor OSSDriverConstructor + testsuites.SkipCheck +} + +func (suite *OSSDriverSuite) SetUpSuite(c *check.C) { + if reason := suite.SkipCheck(); reason != "" { + c.Skip(reason) + } +} + +func (suite *OSSDriverSuite) TestEmptyRootList(c *check.C) { + validRoot, err := ioutil.TempDir("", "driver-") + c.Assert(err, check.IsNil) + defer os.Remove(validRoot) + + rootedDriver, err := suite.Constructor(validRoot) + c.Assert(err, check.IsNil) + emptyRootDriver, err := suite.Constructor("") + c.Assert(err, check.IsNil) + slashRootDriver, err := suite.Constructor("/") + c.Assert(err, check.IsNil) + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + c.Assert(err, check.IsNil) + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + } +} From b8a276f2db179e3b381f60a11e6f063ed1e1781f Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 12 May 2015 00:06:14 +0800 Subject: [PATCH 02/20] Fix the warning of golint Signed-off-by: Li Yi --- registry/storage/driver/oss/oss.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index f85c75411..598bc55cd 100755 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -47,7 +47,7 @@ const listMax = 1000 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { - AccessKeyId string + AccessKeyID string AccessKeySecret string Bucket string Region oss.Region @@ -176,7 +176,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { } params := DriverParameters{ - AccessKeyId: fmt.Sprint(accessKey), + AccessKeyID: fmt.Sprint(accessKey), AccessKeySecret: fmt.Sprint(secretKey), Bucket: fmt.Sprint(bucket), Region: oss.Region(fmt.Sprint(regionName)), @@ -194,7 +194,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // bucketName func New(params DriverParameters) (*Driver, error) { - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyId, params.AccessKeySecret) + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the From c3b42db014afe2c90dc15053547fe69191c12805 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 12 May 2015 11:56:00 +0800 Subject: [PATCH 03/20] Add the secure access with HTTPS Signed-off-by: Li Yi --- docs/storage-drivers/oss.md | 16 +++++++++++----- registry/storage/driver/oss/oss.go | 4 ++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md index a8acd2775..997490156 100755 --- a/docs/storage-drivers/oss.md +++ b/docs/storage-drivers/oss.md @@ -1,10 +1,10 @@ -# OSS storage driver +# Aliyun OSS storage driver An implementation of the `storagedriver.StorageDriver` interface which uses Aliyun OSS for object storage. @@ -14,10 +14,16 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Aliy `accesskeysecret`: Your access key secret. -`region`: The name of the aws region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region +`region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at [http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region](http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region) + +`internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at [http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region](http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region) `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). +`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). + +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. + `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 598bc55cd..9c52d5773 100755 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -125,7 +125,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { if ok { internalBool, ok = internal.(bool) if !ok { - return nil, fmt.Errorf("The encrypt parameter should be a boolean") + return nil, fmt.Errorf("The internal parameter should be a boolean") } } @@ -194,7 +194,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // bucketName func New(params DriverParameters) (*Driver, error) { - client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret) + client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the From 38b23c8dff272897aced0589d249d0f8da51a65b Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 12 May 2015 14:19:05 +0800 Subject: [PATCH 04/20] Update Godeps for Aliyun OSS Signed-off-by: Li Yi --- Godeps/Godeps.json | 8 + .../denverdino/aliyungo/oss/client.go | 1252 +++++++++++++++++ .../denverdino/aliyungo/oss/client_test.go | 211 +++ .../denverdino/aliyungo/oss/config_test.go | 14 + .../denverdino/aliyungo/oss/export.go | 23 + .../denverdino/aliyungo/oss/multi.go | 460 ++++++ .../denverdino/aliyungo/oss/multi_test.go | 161 +++ .../denverdino/aliyungo/oss/regions.go | 53 + .../denverdino/aliyungo/oss/signature.go | 105 ++ .../denverdino/aliyungo/util/attempt.go | 74 + .../denverdino/aliyungo/util/attempt_test.go | 90 ++ .../denverdino/aliyungo/util/encoding.go | 113 ++ .../denverdino/aliyungo/util/encoding_test.go | 46 + .../denverdino/aliyungo/util/iso6801.go | 62 + .../denverdino/aliyungo/util/iso6801_test.go | 50 + .../denverdino/aliyungo/util/signature.go | 40 + .../aliyungo/util/signature_test.go | 14 + .../denverdino/aliyungo/util/util.go | 54 + 18 files changed, 2830 insertions(+) create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 174280023..d21f52608 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -44,6 +44,14 @@ "Comment": "1.2.0-66-g6086d79", "Rev": "6086d7927ec35315964d9fea46df6c04e6d697c1" }, + { + "ImportPath": "github.com/denverdino/aliyungo/oss", + "Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb" + }, + { + "ImportPath": "github.com/denverdino/aliyungo/util", + "Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb" + }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", "Comment": "v1.4.1-3932-gb63ec6e", diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go new file mode 100644 index 000000000..8e38ea4ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go @@ -0,0 +1,1252 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/xml" + "fmt" + "github.com/denverdino/aliyungo/util" + "io" + "io/ioutil" + "log" + "mime" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" +) + +const DefaultContentType = "application/octet-stream" + +// The Client type encapsulates operations with an OSS region. +type Client struct { + AccessKeyId string + AccessKeySecret string + Region Region + Internal bool + Secure bool + ConnectTimeout time.Duration + ReadTimeout time.Duration + debug bool +} + +// The Bucket type encapsulates operations with an bucket. +type Bucket struct { + *Client + Name string +} + +// The Owner type represents the owner of the object in an bucket. +type Owner struct { + ID string + DisplayName string +} + +// Options struct +// +type Options struct { + ServerSideEncryption bool + Meta map[string][]string + ContentEncoding string + CacheControl string + ContentMD5 string + ContentDisposition string + //Range string + //Expires int +} + +type CopyOptions struct { + Headers http.Header + CopySourceOptions string + MetadataDirective string + //ContentType string +} + +// CopyObjectResult is the output from a Copy request +type CopyObjectResult struct { + ETag string + LastModified string +} + +var attempts = util.AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// NewOSSClient creates a new OSS. + +func NewOSSClient(region Region, internal bool, accessKeyId string, accessKeySecret string, secure bool) *Client { + return &Client{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + Region: region, + Internal: internal, + debug: false, + Secure: secure, + } +} + +// SetDebug sets debug mode to log the request/response message +func (client *Client) SetDebug(debug bool) { + client.debug = debug +} + +// Bucket returns a Bucket with the given name. +func (client *Client) Bucket(name string) *Bucket { + name = strings.ToLower(name) + return &Bucket{ + Client: client, + Name: name, + } +} + +type BucketInfo struct { + Name string + CreationDate string +} + +type GetServiceResp struct { + Owner Owner + Buckets []BucketInfo `xml:">Bucket"` +} + +// GetService gets a list of all buckets owned by an account. +func (client *Client) GetService() (*GetServiceResp, error) { + bucket := client.Bucket("") + + r, err := bucket.Get("") + if err != nil { + return nil, err + } + + // Parse the XML response. + var resp GetServiceResp + if err = xml.Unmarshal(r, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +type ACL string + +const ( + Private = ACL("private") + PublicRead = ACL("public-read") + PublicReadWrite = ACL("public-read-write") + AuthenticatedRead = ACL("authenticated-read") + BucketOwnerRead = ACL("bucket-owner-read") + BucketOwnerFull = ACL("bucket-owner-full-control") +) + +var createBucketConfiguration = ` + %s +` + +// locationConstraint returns an io.Reader specifying a LocationConstraint if +// required for the region. +func (client *Client) locationConstraint() io.Reader { + constraint := fmt.Sprintf(createBucketConfiguration, client.Region) + return strings.NewReader(constraint) +} + +// PutBucket creates a new bucket. +func (b *Bucket) PutBucket(perm ACL) error { + headers := make(http.Header) + if perm != "" { + headers.Set("x-oss-acl", string(perm)) + } + req := &request{ + method: "PUT", + bucket: b.Name, + path: "/", + headers: headers, + payload: b.Client.locationConstraint(), + } + return b.Client.query(req, nil) +} + +// DelBucket removes an existing bucket. All objects in the bucket must +// be removed before the bucket itself can be removed. +func (b *Bucket) DelBucket() (err error) { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + for attempt := attempts.Start(); attempt.Next(); { + err = b.Client.query(req, nil) + if !shouldRetry(err) { + break + } + } + return err +} + +// Get retrieves an object from an bucket. +func (b *Bucket) Get(path string) (data []byte, err error) { + body, err := b.GetReader(path) + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(body) + body.Close() + return data, err +} + +// GetReader retrieves an object from an bucket, +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading. +func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { + resp, err := b.GetResponse(path) + if resp != nil { + return resp.Body, err + } + return nil, err +} + +// GetResponse retrieves an object from an bucket, +// returning the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { + return b.GetResponseWithHeaders(path, make(http.Header)) +} + +// GetResponseWithHeaders retrieves an object from an bucket +// Accepts custom headers to be sent as the second parameter +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponseWithHeaders(path string, headers http.Header) (resp *http.Response, err error) { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Get retrieves an object from an bucket. +func (b *Bucket) GetWithParams(path string, params url.Values) (data []byte, err error) { + resp, err := b.GetResponseWithParamsAndHeaders(path, params, nil) + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + return data, err +} + +func (b *Bucket) GetResponseWithParamsAndHeaders(path string, params url.Values, headers http.Header) (resp *http.Response, err error) { + req := &request{ + bucket: b.Name, + path: path, + params: params, + headers: headers, + } + err = b.Client.prepare(req) + if err != nil { + return nil, err + } + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Exists checks whether or not an object exists on an bucket using a HEAD request. +func (b *Bucket) Exists(path string) (exists bool, err error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.Client.prepare(req) + if err != nil { + return + } + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.Client.run(req, nil) + + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + // We can treat a 403 or 404 as non existance + if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { + return false, nil + } + return false, err + } + + if resp.StatusCode/100 == 2 { + exists = true + } + if resp.Body != nil { + resp.Body.Close() + } + return exists, err + } + return false, fmt.Errorf("OSS Currently Unreachable") +} + +// Head HEADs an object in the bucket, returns the response with +func (b *Bucket) Head(path string, headers http.Header) (*http.Response, error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.Client.prepare(req) + if err != nil { + return nil, err + } + + for attempt := attempts.Start(); attempt.Next(); { + resp, err := b.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, err + } + return nil, fmt.Errorf("OSS Currently Unreachable") +} + +// Put inserts an object into the bucket. +func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { + body := bytes.NewBuffer(data) + return b.PutReader(path, body, int64(len(data)), contType, perm, options) +} + +// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { + headers := make(http.Header) + + headers.Set("x-oss-acl", string(perm)) + headers.Set("x-oss-copy-source", source) + + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + } + resp := &CopyObjectResult{} + err := b.Client.query(req, resp) + if err != nil { + return resp, err + } + return resp, nil +} + +// PutReader inserts an object into the bucket by consuming data +// from r until EOF. +func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(length, 10)) + headers.Set("Content-Type", contType) + headers.Set("x-oss-acl", string(perm)) + + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.Client.query(req, nil) +} + +// PutFile creates/updates object with file +func (b *Bucket) PutFile(path string, file *os.File, perm ACL, options Options) error { + var contentType string + if dotPos := strings.LastIndex(file.Name(), "."); dotPos == -1 { + contentType = DefaultContentType + } else { + if mimeType := mime.TypeByExtension(file.Name()[dotPos:]); mimeType == "" { + contentType = DefaultContentType + } else { + contentType = mimeType + } + } + stats, err := file.Stat() + if err != nil { + log.Panicf("Unable to read file %s stats.", file.Name()) + return nil + } + + return b.PutReader(path, file, stats.Size(), contentType, perm, options) +} + +// addHeaders adds o's specified fields to headers +func (o Options) addHeaders(headers http.Header) { + if o.ServerSideEncryption { + headers.Set("x-oss-server-side-encryption", "AES256") + } + if len(o.ContentEncoding) != 0 { + headers.Set("Content-Encoding", o.ContentEncoding) + } + if len(o.CacheControl) != 0 { + headers.Set("Cache-Control", o.CacheControl) + } + if len(o.ContentMD5) != 0 { + headers.Set("Content-MD5", o.ContentMD5) + } + if len(o.ContentDisposition) != 0 { + headers.Set("Content-Disposition", o.ContentDisposition) + } + + for k, v := range o.Meta { + for _, mv := range v { + headers.Add("x-oss-meta-"+k, mv) + } + } +} + +// addHeaders adds o's specified fields to headers +func (o CopyOptions) addHeaders(headers http.Header) { + if len(o.MetadataDirective) != 0 { + headers.Set("x-oss-metadata-directive", o.MetadataDirective) + } + if len(o.CopySourceOptions) != 0 { + headers.Set("x-oss-copy-source-range", o.CopySourceOptions) + } + if o.Headers != nil { + for k, v := range o.Headers { + newSlice := make([]string, len(v)) + copy(newSlice, v) + headers[k] = newSlice + } + } +} + +func makeXMLBuffer(doc []byte) *bytes.Buffer { + buf := new(bytes.Buffer) + buf.WriteString(xml.Header) + buf.Write(doc) + return buf +} + +type IndexDocument struct { + Suffix string `xml:"Suffix"` +} + +type ErrorDocument struct { + Key string `xml:"Key"` +} + +type RoutingRule struct { + ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` + RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` + RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` +} + +type RedirectAllRequestsTo struct { + HostName string `xml:"HostName"` + Protocol string `xml:"Protocol,omitempty"` +} + +type WebsiteConfiguration struct { + XMLName xml.Name `xml:"http://doc.oss-cn-hangzhou.aliyuncs.com WebsiteConfiguration"` + IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` + ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` + RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` + RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` +} + +// PutBucketWebsite configures a bucket as a website. +func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { + doc, err := xml.Marshal(configuration) + if err != nil { + return err + } + + buf := makeXMLBuffer(doc) + + return b.PutBucketSubresource("website", buf, int64(buf.Len())) +} + +func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(length, 10)) + + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: r, + params: url.Values{subresource: {""}}, + } + + return b.Client.query(req, nil) +} + +// Del removes an object from the bucket. +func (b *Bucket) Del(path string) error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: path, + } + return b.Client.query(req, nil) +} + +type Delete struct { + Quiet bool `xml:"Quiet,omitempty"` + Objects []Object `xml:"Object"` +} + +type Object struct { + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DelMulti removes up to 1000 objects from the bucket. +func (b *Bucket) DelMulti(objects Delete) error { + doc, err := xml.Marshal(objects) + if err != nil { + return err + } + + buf := makeXMLBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(int64(size), 10)) + headers.Set("Content-MD5", base64.StdEncoding.EncodeToString(digest.Sum(nil))) + headers.Set("Content-Type", "text/xml") + + req := &request{ + path: "/", + method: "POST", + params: url.Values{"delete": {""}}, + bucket: b.Name, + headers: headers, + payload: buf, + } + + return b.Client.query(req, nil) +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + MaxKeys int + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` + // if IsTruncated is true, pass NextMarker as marker argument to List() + // to get the next set of keys + NextMarker string +} + +// The Key type represents an item stored in an bucket. +type Key struct { + Key string + LastModified string + Type string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// List returns information about objects in an bucket. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// The marker parameter specifies the key to start with when listing objects +// in a bucket. OSS lists objects in alphabetical order and +// will return keys alphabetically greater than the marker. +// +// The max parameter specifies how many keys + common prefixes to return in +// the response. The default is 1000. +// +// For example, given these keys in a bucket: +// +// index.html +// index2.html +// photos/2006/January/sample.jpg +// photos/2006/February/sample2.jpg +// photos/2006/February/sample3.jpg +// photos/2006/February/sample4.jpg +// +// Listing this bucket with delimiter set to "/" would yield the +// following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Contents: []Key{ +// {Key: "index.html", "index2.html"}, +// }, +// CommonPrefixes: []string{ +// "photos/", +// }, +// } +// +// Listing the same bucket with delimiter set to "/" and prefix set to +// "photos/2006/" would yield the following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Prefix: "photos/2006/", +// CommonPrefixes: []string{ +// "photos/2006/February/", +// "photos/2006/January/", +// }, +// } +// +func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { + params := make(url.Values) + params.Set("prefix", prefix) + params.Set("delimiter", delim) + params.Set("marker", marker) + if max != 0 { + params.Set("max-keys", strconv.FormatInt(int64(max), 10)) + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &ListResp{} + for attempt := attempts.Start(); attempt.Next(); { + err = b.Client.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + // if NextMarker is not returned, it should be set to the name of last key, + // so let's do it so that each caller doesn't have to + if result.IsTruncated && result.NextMarker == "" { + n := len(result.Contents) + if n > 0 { + result.NextMarker = result.Contents[n-1].Key + } + } + return result, nil +} + +//// The VersionsResp type holds the results of a list bucket Versions operation. +//type VersionsResp struct { +// Name string +// Prefix string +// KeyMarker string +// VersionIdMarker string +// MaxKeys int +// Delimiter string +// IsTruncated bool +// Versions []Version `xml:"Version"` +// CommonPrefixes []string `xml:">Prefix"` +//} + +//// The Version type represents an object version stored in an bucket. +//type Version struct { +// Key string +// VersionId string +// IsLatest bool +// LastModified string +// // ETag gives the hex-encoded MD5 sum of the contents, +// // surrounded with double-quotes. +// ETag string +// Size int64 +// Owner Owner +// StorageClass string +//} + +//func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { +// params := url.Values{} +// params.Set("versions", "") +// params.Set("prefix", prefix) +// params.Set("delimiter", delim) + +// if len(versionIdMarker) != 0 { +// params["version-id-marker"] = []string{versionIdMarker} +// } +// if len(keyMarker) != 0 { +// params["key-marker"] = []string{keyMarker} +// } + +// if max != 0 { +// params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} +// } +// req := &request{ +// bucket: b.Name, +// params: params, +// } +// result = &VersionsResp{} +// for attempt := attempts.Start(); attempt.Next(); { +// err = b.Client.query(req, result) +// if !shouldRetry(err) { +// break +// } +// } +// if err != nil { +// return nil, err +// } +// return result, nil +//} + +type GetLocationResp struct { + Location string `xml:",innerxml"` +} + +func (b *Bucket) Location() (string, error) { + params := make(url.Values) + params.Set("location", "") + r, err := b.GetWithParams("/", params) + + if err != nil { + return "", err + } + + // Parse the XML response. + var resp GetLocationResp + if err = xml.Unmarshal(r, &resp); err != nil { + return "", err + } + + if resp.Location == "" { + return string(Hangzhou), nil + } + return resp.Location, nil +} + +func (b *Bucket) Path(path string) string { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return "/" + b.Name + path +} + +// URL returns a non-signed URL that allows retriving the +// object at path. It only works if the object is publicly +// readable (see SignedURL). +func (b *Bucket) URL(path string) string { + req := &request{ + bucket: b.Name, + path: path, + } + err := b.Client.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + u.RawQuery = "" + return u.String() +} + +// SignedURL returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURL(path string, expires time.Time) string { + return b.SignedURLWithArgs(path, expires, nil, nil) +} + +// SignedURLWithArgs returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { + return b.SignedURLWithMethod("GET", path, expires, params, headers) +} + +// SignedURLWithMethod returns a signed URL that allows anyone holding the URL +// to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. +func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { + var uv = url.Values{} + + if params != nil { + uv = params + } + + uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) + uv.Set("OSSAccessKeyId", b.AccessKeyId) + + req := &request{ + method: method, + bucket: b.Name, + path: path, + params: uv, + headers: headers, + } + err := b.Client.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + + return u.String() +} + +// UploadSignedURL returns a signed URL that allows anyone holding the URL +// to upload the object at path. The signature is valid until expires. +// contenttype is a string like image/png +// name is the resource name in OSS terminology like images/ali.png [obviously excluding the bucket name itself] +func (b *Bucket) UploadSignedURL(name, method, contentType string, expires time.Time) string { + //TODO TESTING + expireDate := expires.Unix() + if method != "POST" { + method = "PUT" + } + + tokenData := "" + + stringToSign := method + "\n\n" + contentType + "\n" + strconv.FormatInt(expireDate, 10) + "\n" + tokenData + "/" + path.Join(b.Name, name) + secretKey := b.AccessKeySecret + accessId := b.AccessKeyId + mac := hmac.New(sha1.New, []byte(secretKey)) + mac.Write([]byte(stringToSign)) + macsum := mac.Sum(nil) + signature := base64.StdEncoding.EncodeToString([]byte(macsum)) + signature = strings.TrimSpace(signature) + + signedurl, err := url.Parse("https://" + b.Name + ".client.amazonaws.com/") + if err != nil { + log.Println("ERROR sining url for OSS upload", err) + return "" + } + signedurl.Path = name + params := url.Values{} + params.Add("OSSAccessKeyId", accessId) + params.Add("Expires", strconv.FormatInt(expireDate, 10)) + params.Add("Signature", signature) + + signedurl.RawQuery = params.Encode() + return signedurl.String() +} + +// PostFormArgsEx returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +// Additional conditions can be specified with conds +func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { + conditions := []string{} + fields = map[string]string{ + "AWSAccessKeyId": b.AccessKeyId, + "key": path, + } + + if conds != nil { + conditions = append(conditions, conds...) + } + + conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) + conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) + if redirect != "" { + conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) + fields["success_action_redirect"] = redirect + } + + vExpiration := expires.Format("2006-01-02T15:04:05Z") + vConditions := strings.Join(conditions, ",") + policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) + policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) + fields["policy"] = policy64 + + signer := hmac.New(sha1.New, []byte(b.AccessKeySecret)) + signer.Write([]byte(policy64)) + fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) + + action = fmt.Sprintf("%s/%s/", b.Client.Region, b.Name) + return +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { + return b.PostFormArgsEx(path, expires, redirect, nil) +} + +type request struct { + method string + bucket string + path string + params url.Values + headers http.Header + baseurl string + payload io.Reader + prepared bool +} + +func (req *request) url() (*url.URL, error) { + u, err := url.Parse(req.baseurl) + if err != nil { + return nil, fmt.Errorf("bad OSS endpoint URL %q: %v", req.baseurl, err) + } + u.RawQuery = req.params.Encode() + u.Path = req.path + return u, nil +} + +// query prepares and runs the req request. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) query(req *request, resp interface{}) error { + err := client.prepare(req) + if err != nil { + return err + } + r, err := client.run(req, resp) + if r != nil && r.Body != nil { + r.Body.Close() + } + return err +} + +// Sets baseurl on req from bucket name and the region endpoint +func (client *Client) setBaseURL(req *request) error { + req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) + + return nil +} + +// partiallyEscapedPath partially escapes the OSS path allowing for all OSS REST API calls. +// +// Some commands including: +// GET Bucket acl http://goo.gl/aoXflF +// GET Bucket cors http://goo.gl/UlmBdx +// GET Bucket lifecycle http://goo.gl/8Fme7M +// GET Bucket policy http://goo.gl/ClXIo3 +// GET Bucket location http://goo.gl/5lh8RD +// GET Bucket Logging http://goo.gl/sZ5ckF +// GET Bucket notification http://goo.gl/qSSZKD +// GET Bucket tagging http://goo.gl/QRvxnM +// require the first character after the bucket name in the path to be a literal '?' and +// not the escaped hex representation '%3F'. +func partiallyEscapedPath(path string) string { + pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") + if len(pathEscapedAndSplit) >= 3 { + if len(pathEscapedAndSplit[2]) >= 3 { + // Check for the one "?" that should not be escaped. + if pathEscapedAndSplit[2][0:3] == "%3F" { + pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] + } + } + } + return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) +} + +// prepare sets up req to be delivered to OSS. +func (client *Client) prepare(req *request) error { + // Copy so they can be mutated without affecting on retries. + headers := copyHeader(req.headers) + params := make(url.Values) + + for k, v := range req.params { + params[k] = v + } + + req.params = params + req.headers = headers + + if !req.prepared { + req.prepared = true + if req.method == "" { + req.method = "GET" + } + + if !strings.HasPrefix(req.path, "/") { + req.path = "/" + req.path + } + + err := client.setBaseURL(req) + if err != nil { + return err + } + } + + req.headers.Set("Date", util.GetGMTime()) + client.signRequest(req) + + return nil +} + +// Prepares an *http.Request for doHttpRequest +func (client *Client) setupHttpRequest(req *request) (*http.Request, error) { + // Copy so that signing the http request will not mutate it + + u, err := req.url() + if err != nil { + return nil, err + } + u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) + + hreq := http.Request{ + URL: u, + Method: req.method, + ProtoMajor: 1, + ProtoMinor: 1, + Close: true, + Header: req.headers, + Form: req.params, + } + + contentLength := req.headers.Get("Content-Length") + + if contentLength != "" { + hreq.ContentLength, _ = strconv.ParseInt(contentLength, 10, 64) + req.headers.Del("Content-Length") + } + + if req.payload != nil { + hreq.Body = ioutil.NopCloser(req.payload) + } + + return &hreq, nil +} + +// doHttpRequest sends hreq and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { + c := http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + deadline := time.Now().Add(client.ReadTimeout) + if client.ConnectTimeout > 0 { + c, err = net.DialTimeout(netw, addr, client.ConnectTimeout) + } else { + c, err = net.Dial(netw, addr) + } + if err != nil { + return + } + if client.ReadTimeout > 0 { + err = c.SetDeadline(deadline) + } + return + }, + Proxy: http.ProxyFromEnvironment, + }, + } + + hresp, err := c.Do(hreq) + if err != nil { + return nil, err + } + if client.debug { + log.Printf("%s %s %d\n", hreq.Method, hreq.URL.String(), hresp.StatusCode) + contentType := hresp.Header.Get("Content-Type") + if contentType == "application/xml" || contentType == "text/xml" { + dump, _ := httputil.DumpResponse(hresp, true) + log.Printf("} -> %s\n", dump) + } else { + log.Printf("Response Content-Type: %s\n", contentType) + } + } + if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { + return nil, client.buildError(hresp) + } + if resp != nil { + err = xml.NewDecoder(hresp.Body).Decode(resp) + hresp.Body.Close() + + if client.debug { + log.Printf("aliyungo.oss> decoded xml into %#v", resp) + } + + } + return hresp, err +} + +// run sends req and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (client *Client) run(req *request, resp interface{}) (*http.Response, error) { + if client.debug { + log.Printf("Running OSS request: %#v", req) + } + + hreq, err := client.setupHttpRequest(req) + if err != nil { + return nil, err + } + + return client.doHttpRequest(hreq, resp) +} + +// Error represents an error in an operation with OSS. +type Error struct { + StatusCode int // HTTP status code (200, 403, ...) + Code string // OSS error code ("UnsupportedOperation", ...) + Message string // The human-oriented error message + BucketName string + RequestId string + HostId string +} + +func (e *Error) Error() string { + return e.Message +} + +func (client *Client) buildError(r *http.Response) error { + if client.debug { + log.Printf("got error (status code %v)", r.StatusCode) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("\tread error: %v", err) + } else { + log.Printf("\tdata:\n%s\n\n", data) + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + } + + err := Error{} + // TODO return error if Unmarshal fails? + xml.NewDecoder(r.Body).Decode(&err) + r.Body.Close() + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + if client.debug { + log.Printf("err: %#v\n", err) + } + return &err +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "read", "write": + return true + } + case *url.Error: + // url.Error can be returned either by net/url if a URL cannot be + // parsed, or by net/http if the response is closed before the headers + // are received or parsed correctly. In that later case, e.Op is set to + // the HTTP method name with the first letter uppercased. We don't want + // to retry on POST operations, since those are not idempotent, all the + // other ones should be safe to retry. + switch e.Op { + case "Get", "Put", "Delete", "Head": + return shouldRetry(e.Err) + default: + return false + } + case *Error: + switch e.Code { + case "InternalError", "NoSuchUpload", "NoSuchBucket": + return true + } + } + return false +} + +func hasCode(err error, code string) bool { + e, ok := err.(*Error) + return ok && e.Code == code +} + +func copyHeader(header http.Header) (newHeader http.Header) { + newHeader = make(http.Header) + for k, v := range header { + newSlice := make([]string, len(v)) + copy(newSlice, v) + newHeader[k] = newSlice + } + return +} + +type AccessControlPolicy struct { + Owner Owner + Grants []string `xml:"AccessControlList>Grant"` +} + +// ACL returns ACL of bucket +func (b *Bucket) ACL() (result *AccessControlPolicy, err error) { + + params := make(url.Values) + params.Set("acl", "") + + r, err := b.GetWithParams("/", params) + if err != nil { + return nil, err + } + + // Parse the XML response. + var resp AccessControlPolicy + if err = xml.Unmarshal(r, &resp); err != nil { + return nil, err + } + + return &resp, nil +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go new file mode 100644 index 000000000..13bc8768e --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client_test.go @@ -0,0 +1,211 @@ +package oss_test + +import ( + "bytes" + "io/ioutil" + //"net/http" + "testing" + "time" + + "github.com/denverdino/aliyungo/oss" +) + +var ( + //If you test on ECS, you can set the internal param to true + client = oss.NewOSSClient(TestRegion, false, TestAccessKeyId, TestAccessKeySecret, false) +) + +func TestCreateBucket(t *testing.T) { + + b := client.Bucket(TestBucket) + err := b.PutBucket(oss.Private) + if err != nil { + t.Errorf("Failed for PutBucket: %v", err) + } + t.Log("Wait a while for bucket creation ...") + time.Sleep(10 * time.Second) +} + +func TestHead(t *testing.T) { + + b := client.Bucket(TestBucket) + _, err := b.Head("name", nil) + + if err == nil { + t.Errorf("Failed for Head: %v", err) + } +} + +func TestPutObject(t *testing.T) { + const DISPOSITION = "attachment; filename=\"0x1a2b3c.jpg\"" + + b := client.Bucket(TestBucket) + err := b.Put("name", []byte("content"), "content-type", oss.Private, oss.Options{ContentDisposition: DISPOSITION}) + if err != nil { + t.Errorf("Failed for Put: %v", err) + } +} + +func TestGet(t *testing.T) { + + b := client.Bucket(TestBucket) + data, err := b.Get("name") + + if err != nil || string(data) != "content" { + t.Errorf("Failed for Get: %v", err) + } +} + +func TestURL(t *testing.T) { + + b := client.Bucket(TestBucket) + url := b.URL("name") + + t.Log("URL: ", url) + // /c.Assert(req.URL.Path, check.Equals, "/denverdino_test/name") +} + +func TestGetReader(t *testing.T) { + + b := client.Bucket(TestBucket) + rc, err := b.GetReader("name") + if err != nil { + t.Fatalf("Failed for GetReader: %v", err) + } + data, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil || string(data) != "content" { + t.Errorf("Failed for ReadAll: %v", err) + } +} + +func aTestGetNotFound(t *testing.T) { + + b := client.Bucket("non-existent-bucket") + _, err := b.Get("non-existent") + if err == nil { + t.Fatalf("Failed for TestGetNotFound: %v", err) + } + ossErr, _ := err.(*oss.Error) + if ossErr.StatusCode != 404 || ossErr.BucketName != "non-existent-bucket" { + t.Errorf("Failed for TestGetNotFound: %v", err) + } + +} + +func TestPutCopy(t *testing.T) { + b := client.Bucket(TestBucket) + t.Log("Source: ", b.Path("name")) + res, err := b.PutCopy("newname", oss.Private, oss.CopyOptions{}, + b.Path("name")) + if err == nil { + t.Logf("Copy result: %v", res) + } else { + t.Errorf("Failed for PutCopy: %v", err) + } +} + +func TestList(t *testing.T) { + + b := client.Bucket(TestBucket) + + data, err := b.List("n", "", "", 0) + if err != nil || len(data.Contents) != 2 { + t.Errorf("Failed for List: %v", err) + } else { + t.Logf("Contents = %++v", data) + } +} + +func TestListWithDelimiter(t *testing.T) { + + b := client.Bucket(TestBucket) + + data, err := b.List("photos/2006/", "/", "some-marker", 1000) + if err != nil || len(data.Contents) != 0 { + t.Errorf("Failed for List: %v", err) + } else { + t.Logf("Contents = %++v", data) + } + +} + +func TestPutReader(t *testing.T) { + + b := client.Bucket(TestBucket) + buf := bytes.NewBufferString("content") + err := b.PutReader("name", buf, int64(buf.Len()), "content-type", oss.Private, oss.Options{}) + if err != nil { + t.Errorf("Failed for PutReader: %v", err) + } + TestGetReader(t) +} + +func TestExists(t *testing.T) { + + b := client.Bucket(TestBucket) + result, err := b.Exists("name") + if err != nil || result != true { + t.Errorf("Failed for Exists: %v", err) + } +} + +func TestLocation(t *testing.T) { + b := client.Bucket(TestBucket) + result, err := b.Location() + + if err != nil || result != string(TestRegion) { + t.Errorf("Failed for Location: %v %s", err, result) + } +} + +func TestACL(t *testing.T) { + b := client.Bucket(TestBucket) + result, err := b.ACL() + + if err != nil { + t.Errorf("Failed for ACL: %v", err) + } else { + t.Logf("AccessControlPolicy: %++v", result) + } +} + +func TestDelObject(t *testing.T) { + + b := client.Bucket(TestBucket) + err := b.Del("name") + if err != nil { + t.Errorf("Failed for Del: %v", err) + } +} + +func TestDelMultiObjects(t *testing.T) { + + b := client.Bucket(TestBucket) + objects := []oss.Object{oss.Object{Key: "newname"}} + err := b.DelMulti(oss.Delete{ + Quiet: false, + Objects: objects, + }) + if err != nil { + t.Errorf("Failed for DelMulti: %v", err) + } +} + +func TestGetService(t *testing.T) { + bucketList, err := client.GetService() + if err != nil { + t.Errorf("Unable to get service: %v", err) + } else { + t.Logf("GetService: %++v", bucketList) + } +} + +func TestDelBucket(t *testing.T) { + + b := client.Bucket(TestBucket) + err := b.DelBucket() + if err != nil { + t.Errorf("Failed for DelBucket: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go new file mode 100644 index 000000000..7c0d25495 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/config_test.go @@ -0,0 +1,14 @@ +package oss_test + +import ( + "github.com/denverdino/aliyungo/oss" +) + +//Modify with your Access Key Id and Access Key Secret +const ( + TestAccessKeyId = "MY_ACCESS_KEY_ID" + TestAccessKeySecret = "MY_ACCESS_KEY_ID" + TestIAmRich = false + TestRegion = oss.Beijing + TestBucket = "denverdino" +) diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go new file mode 100644 index 000000000..ebdb0477a --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/export.go @@ -0,0 +1,23 @@ +package oss + +import ( + "github.com/denverdino/aliyungo/util" +) + +var originalStrategy = attempts + +func SetAttemptStrategy(s *util.AttemptStrategy) { + if s == nil { + attempts = originalStrategy + } else { + attempts = *s + } +} + +func SetListPartsMax(n int) { + listPartsMax = n +} + +func SetListMultiMax(n int) { + listMultiMax = n +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go new file mode 100644 index 000000000..454d53b70 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go @@ -0,0 +1,460 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "io" + //"log" + "net/http" + "net/url" + "sort" + "strconv" + "strings" +) + +// Multi represents an unfinished multipart upload. +// +// Multipart uploads allow sending big objects in smaller chunks. +// After all parts have been sent, the upload must be explicitly +// completed by calling Complete with the list of parts. + +type Multi struct { + Bucket *Bucket + Key string + UploadId string +} + +// That's the default. Here just for testing. +var listMultiMax = 1000 + +type listMultiResp struct { + NextKeyMarker string + NextUploadIdMarker string + IsTruncated bool + Upload []Multi + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// ListMulti returns the list of unfinished multipart uploads in b. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. You can use prefixes to separate a bucket into different +// groupings of keys (to get the feeling of folders, for example). +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { + params := make(url.Values) + params.Set("uploads", "") + params.Set("max-uploads", strconv.FormatInt(int64(listMultiMax), 10)) + params.Set("prefix", prefix) + params.Set("delimiter", delim) + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: b.Name, + params: params, + } + var resp listMultiResp + err := b.Client.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, nil, err + } + for i := range resp.Upload { + multi := &resp.Upload[i] + multi.Bucket = b + multis = append(multis, multi) + } + prefixes = append(prefixes, resp.CommonPrefixes...) + if !resp.IsTruncated { + return multis, prefixes, nil + } + params.Set("key-marker", resp.NextKeyMarker) + params.Set("upload-id-marker", resp.NextUploadIdMarker) + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +// Multi returns a multipart upload handler for the provided key +// inside b. If a multipart upload exists for key, it is returned, +// otherwise a new multipart upload is initiated with contType and perm. +func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { + multis, _, err := b.ListMulti(key, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + for _, m := range multis { + if m.Key == key { + return m, nil + } + } + return b.InitMulti(key, contType, perm, options) +} + +// InitMulti initializes a new multipart upload at the provided +// key inside b and returns a value for manipulating it. +// +func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { + headers := make(http.Header) + headers.Set("Content-Length", "0") + headers.Set("Content-Type", contType) + headers.Set("x-oss-acl", string(perm)) + + options.addHeaders(headers) + params := make(url.Values) + params.Set("uploads", "") + req := &request{ + method: "POST", + bucket: b.Name, + path: key, + headers: headers, + params: params, + } + var err error + var resp struct { + UploadId string `xml:"UploadId"` + } + for attempt := attempts.Start(); attempt.Next(); { + err = b.Client.query(req, &resp) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil +} + +func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { + headers := make(http.Header) + headers.Set("x-oss-copy-source", source) + + options.addHeaders(headers) + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("partNumber", strconv.FormatInt(int64(n), 10)) + + sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) + //log.Println("source: ", source) + //log.Println("sourceBucket: ", sourceBucket.Name) + //log.Println("HEAD: ", strings.Split(source, "/")[2]) + sourceMeta, err := sourceBucket.Head(strings.Split(source, "/")[2], nil) + if err != nil { + return nil, Part{}, err + } + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + } + resp := &CopyObjectResult{} + err = m.Bucket.Client.query(req, resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, Part{}, err + } + if resp.ETag == "" { + return nil, Part{}, errors.New("part upload succeeded with no ETag") + } + return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil + } + panic("unreachable") +} + +// PutPart sends part n of the multipart upload, reading all the content from r. +// Each part, except for the last one, must be at least 5MB in size. +// +func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64) +} + +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { + headers := make(http.Header) + headers.Set("Content-Length", strconv.FormatInt(partSize, 10)) + headers.Set("Content-MD5", md5b64) + + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("partNumber", strconv.FormatInt(int64(n), 10)) + + for attempt := attempts.Start(); attempt.Next(); { + _, err := r.Seek(0, 0) + if err != nil { + return Part{}, err + } + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + payload: r, + } + err = m.Bucket.Client.prepare(req) + if err != nil { + return Part{}, err + } + resp, err := m.Bucket.Client.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return Part{}, err + } + etag := resp.Header.Get("ETag") + if etag == "" { + return Part{}, errors.New("part upload succeeded with no ETag") + } + return Part{n, etag, partSize}, nil + } + panic("unreachable") +} + +func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { + _, err = r.Seek(0, 0) + if err != nil { + return 0, "", "", err + } + digest := md5.New() + size, err = io.Copy(digest, r) + if err != nil { + return 0, "", "", err + } + sum := digest.Sum(nil) + md5hex = hex.EncodeToString(sum) + md5b64 = base64.StdEncoding.EncodeToString(sum) + return size, md5hex, md5b64, nil +} + +type Part struct { + N int `xml:"PartNumber"` + ETag string + Size int64 +} + +type partSlice []Part + +func (s partSlice) Len() int { return len(s) } +func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } +func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type listPartsResp struct { + NextPartNumberMarker string + IsTruncated bool + Part []Part +} + +// That's the default. Here just for testing. +var listPartsMax = 1000 + +// ListParts for backcompatability. See the documentation for ListPartsFull +func (m *Multi) ListParts() ([]Part, error) { + return m.ListPartsFull(0, listPartsMax) +} + +// ListPartsFull returns the list of previously uploaded parts in m, +// ordered by part number (Only parts with higher part numbers than +// partNumberMarker will be listed). Only up to maxParts parts will be +// returned. +// +func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { + if maxParts > listPartsMax { + maxParts = listPartsMax + } + + params := make(url.Values) + params.Set("uploadId", m.UploadId) + params.Set("max-parts", strconv.FormatInt(int64(maxParts), 10)) + params.Set("part-number-marker", strconv.FormatInt(int64(partNumberMarker), 10)) + + var parts partSlice + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + var resp listPartsResp + err := m.Bucket.Client.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + parts = append(parts, resp.Part...) + if !resp.IsTruncated { + sort.Sort(parts) + return parts, nil + } + params.Set("part-number-marker", resp.NextPartNumberMarker) + attempt = attempts.Start() // Last request worked. + } + panic("unreachable") +} + +type ReaderAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} + +// PutAll sends all of r via a multipart upload with parts no larger +// than partSize bytes, which must be set to at least 5MB. +// Parts previously uploaded are either reused if their checksum +// and size match the new part, or otherwise overwritten with the +// new content. +// PutAll returns all the parts of m (reused or not). +func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { + old, err := m.ListParts() + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + reuse := 0 // Index of next old part to consider reusing. + current := 1 // Part number of latest good part handled. + totalSize, err := r.Seek(0, 2) + if err != nil { + return nil, err + } + first := true // Must send at least one empty part if the file is empty. + var result []Part +NextSection: + for offset := int64(0); offset < totalSize || first; offset += partSize { + first = false + if offset+partSize > totalSize { + partSize = totalSize - offset + } + section := io.NewSectionReader(r, offset, partSize) + _, md5hex, md5b64, err := seekerInfo(section) + if err != nil { + return nil, err + } + for reuse < len(old) && old[reuse].N <= current { + // Looks like this part was already sent. + part := &old[reuse] + etag := `"` + md5hex + `"` + if part.N == current && part.Size == partSize && part.ETag == etag { + // Checksum matches. Reuse the old part. + result = append(result, *part) + current++ + continue NextSection + } + reuse++ + } + + // Part wasn't found or doesn't match. Send it. + part, err := m.putPart(current, section, partSize, md5b64) + if err != nil { + return nil, err + } + result = append(result, part) + current++ + } + return result, nil +} + +type completeUpload struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts completeParts `xml:"Part"` +} + +type completePart struct { + PartNumber int + ETag string +} + +type completeParts []completePart + +func (p completeParts) Len() int { return len(p) } +func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } +func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Complete assembles the given previously uploaded parts into the +// final object. This operation may take several minutes. +// +func (m *Multi) Complete(parts []Part) error { + params := make(url.Values) + params.Set("uploadId", m.UploadId) + + c := completeUpload{} + for _, p := range parts { + c.Parts = append(c.Parts, completePart{p.N, p.ETag}) + } + sort.Sort(c.Parts) + data, err := xml.Marshal(&c) + if err != nil { + return err + } + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "POST", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + payload: bytes.NewReader(data), + } + err := m.Bucket.Client.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} + +// Abort deletes an unifinished multipart upload and any previously +// uploaded parts for it. +// +// After a multipart upload is aborted, no additional parts can be +// uploaded using it. However, if any part uploads are currently in +// progress, those part uploads might or might not succeed. As a result, +// it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// NOTE: If the described scenario happens to you, please report back to +// the goamz authors with details. In the future such retrying should be +// handled internally, but it's not clear what happens precisely (Is an +// error returned? Is the issue completely undetectable?). +// +func (m *Multi) Abort() error { + params := make(url.Values) + params.Set("uploadId", m.UploadId) + + for attempt := attempts.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + err := m.Bucket.Client.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go new file mode 100644 index 000000000..6ecd63beb --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi_test.go @@ -0,0 +1,161 @@ +package oss_test + +import ( + //"encoding/xml" + "github.com/denverdino/aliyungo/oss" + "testing" + //"io" + //"io/ioutil" + "strings" +) + +func TestCreateBucketMulti(t *testing.T) { + TestCreateBucket(t) +} + +func TestInitMulti(t *testing.T) { + b := client.Bucket(TestBucket) + + metadata := make(map[string][]string) + metadata["key1"] = []string{"value1"} + metadata["key2"] = []string{"value2"} + options := oss.Options{ + ServerSideEncryption: true, + Meta: metadata, + ContentEncoding: "text/utf8", + CacheControl: "no-cache", + ContentMD5: "0000000000000000", + } + + multi, err := b.InitMulti("multi", "text/plain", oss.Private, options) + if err != nil { + t.Errorf("Failed for InitMulti: %v", err) + } else { + t.Logf("InitMulti result: %++v", multi) + } +} + +func TestMultiReturnOld(t *testing.T) { + + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Errorf("Failed for Multi: %v", err) + } else { + t.Logf("Multi result: %++v", multi) + } + +} + +func TestPutPart(t *testing.T) { + + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Fatalf("Failed for Multi: %v", err) + } + + part, err := multi.PutPart(1, strings.NewReader("")) + if err != nil { + t.Errorf("Failed for PutPart: %v", err) + } else { + t.Logf("PutPart result: %++v", part) + } + +} +func TestPutPartCopy(t *testing.T) { + + TestPutObject(t) + + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Fatalf("Failed for Multi: %v", err) + } + + res, part, err := multi.PutPartCopy(2, oss.CopyOptions{}, b.Path("name")) + if err != nil { + t.Errorf("Failed for PutPartCopy: %v", err) + } else { + t.Logf("PutPartCopy result: %++v %++v", part, res) + } + TestDelObject(t) +} + +func TestListParts(t *testing.T) { + + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Fatalf("Failed for Multi: %v", err) + } + + parts, err := multi.ListParts() + if err != nil { + t.Errorf("Failed for ListParts: %v", err) + } else { + t.Logf("ListParts result: %++v", parts) + } +} +func TestListMulti(t *testing.T) { + + b := client.Bucket(TestBucket) + + multis, prefixes, err := b.ListMulti("", "/") + if err != nil { + t.Errorf("Failed for ListMulti: %v", err) + } else { + t.Logf("ListMulti result : %++v %++v", multis, prefixes) + } +} +func TestMultiAbort(t *testing.T) { + + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Fatalf("Failed for Multi: %v", err) + } + + err = multi.Abort() + if err != nil { + t.Errorf("Failed for Abort: %v", err) + } + +} + +func TestPutAll(t *testing.T) { + TestInitMulti(t) + // Don't retry the NoSuchUpload error. + b := client.Bucket(TestBucket) + + multi, err := b.Multi("multi", "text/plain", oss.Private, oss.Options{}) + if err != nil { + t.Fatalf("Failed for Multi: %v", err) + } + + // Must send at least one part, so that completing it will work. + parts, err := multi.PutAll(strings.NewReader("part1part2last"), 5) + if err != nil { + t.Errorf("Failed for PutAll: %v", err) + } else { + t.Logf("PutAll result: %++v", parts) + } + // // Must send at least one part, so that completing it will work. + // err = multi.Complete(parts) + // if err != nil { + // t.Errorf("Failed for Complete: %v", err) + // } + err = multi.Abort() + if err != nil { + t.Errorf("Failed for Abort: %v", err) + } +} + +func TestCleanUp(t *testing.T) { + TestDelBucket(t) +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go new file mode 100644 index 000000000..2bba73827 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/regions.go @@ -0,0 +1,53 @@ +package oss + +import ( + "fmt" +) + +// Region represents OSS region +type Region string + +// Constants of region definition +const ( + Hangzhou = Region("oss-cn-hangzhou") + Qingdao = Region("oss-cn-qingdao") + Beijing = Region("oss-cn-beijing") + Hongkong = Region("oss-cn-hongkong") + Shenzhen = Region("oss-cn-shenzhen") + USWest1 = Region("oss-us-west-1") + DefaultRegion = Hangzhou +) + +// GetEndpoint returns endpoint of region +func (r Region) GetEndpoint(internal bool, bucket string, secure bool) string { + if internal { + return r.GetInternalEndpoint(bucket, secure) + } + return r.GetInternetEndpoint(bucket, secure) +} + +func getProtocol(secure bool) string { + protocol := "http" + if secure { + protocol = "https" + } + return protocol +} + +// GetInternetEndpoint returns internet endpoint of region +func (r Region) GetInternetEndpoint(bucket string, secure bool) string { + protocol := getProtocol(secure) + if bucket == "" { + return fmt.Sprintf("%s://oss.aliyuncs.com", protocol) + } + return fmt.Sprintf("%s://%s.%s.aliyuncs.com", protocol, bucket, string(r)) +} + +// GetInternalEndpoint returns internal endpoint of region +func (r Region) GetInternalEndpoint(bucket string, secure bool) string { + protocol := getProtocol(secure) + if bucket == "" { + return fmt.Sprintf("%s://oss-internal.aliyuncs.com", protocol) + } + return fmt.Sprintf("%s://%s.%s-internal.aliyuncs.com", protocol, bucket, string(r)) +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go new file mode 100644 index 000000000..a261644a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/signature.go @@ -0,0 +1,105 @@ +package oss + +import ( + "github.com/denverdino/aliyungo/util" + //"log" + "net/http" + "net/url" + "sort" + "strings" +) + +const HeaderOSSPrefix = "x-oss-" + +var ossParamsToSign = map[string]bool{ + "acl": true, + "delete": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, +} + +func (client *Client) signRequest(request *request) { + query := request.params + + urlSignature := query.Get("OSSAccessKeyId") != "" + + headers := request.headers + contentMd5 := headers.Get("Content-Md5") + contentType := headers.Get("Content-Type") + date := "" + if urlSignature { + date = query.Get("Expires") + } else { + date = headers.Get("Date") + } + + resource := request.path + if request.bucket != "" { + resource = "/" + request.bucket + request.path + } + params := make(url.Values) + for k, v := range query { + if ossParamsToSign[k] { + params[k] = v + } + } + + if len(params) > 0 { + resource = resource + "?" + util.Encode(params) + } + + canonicalizedResource := resource + + _, canonicalizedHeader := canonicalizeHeader(headers) + + stringToSign := request.method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedHeader + canonicalizedResource + + //log.Println("stringToSign: ", stringToSign) + signature := util.CreateSignature(stringToSign, client.AccessKeySecret) + + if query.Get("OSSAccessKeyId") != "" { + query.Set("Signature", signature) + } else { + headers.Set("Authorization", "OSS "+client.AccessKeyId+":"+signature) + } +} + +//Have to break the abstraction to append keys with lower case. +func canonicalizeHeader(headers http.Header) (newHeaders http.Header, result string) { + var canonicalizedHeaders []string + newHeaders = http.Header{} + + for k, v := range headers { + if lower := strings.ToLower(k); strings.HasPrefix(lower, HeaderOSSPrefix) { + newHeaders[lower] = v + canonicalizedHeaders = append(canonicalizedHeaders, lower) + } else { + newHeaders[k] = v + } + } + + sort.Strings(canonicalizedHeaders) + + var canonicalizedHeader string + + for _, k := range canonicalizedHeaders { + canonicalizedHeader += k + ":" + headers.Get(k) + "\n" + } + return newHeaders, canonicalizedHeader +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go new file mode 100644 index 000000000..e71ed19f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go @@ -0,0 +1,74 @@ +package util + +import ( + "time" +) + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other goamz packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go new file mode 100644 index 000000000..50e9be7a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt_test.go @@ -0,0 +1,90 @@ +package util + +import ( + "testing" + "time" +) + +func TestAttemptTiming(t *testing.T) { + testAttempt := AttemptStrategy{ + Total: 0.25e9, + Delay: 0.1e9, + } + want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9} + got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing + t0 := time.Now() + for a := testAttempt.Start(); a.Next(); { + got = append(got, time.Now().Sub(t0)) + } + got = append(got, time.Now().Sub(t0)) + if len(got) != len(want) { + t.Fatalf("Failed!") + } + const margin = 0.01e9 + for i, got := range want { + lo := want[i] - margin + hi := want[i] + margin + if got < lo || got > hi { + t.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds()) + } + } +} + +func TestAttemptNextHasNext(t *testing.T) { + a := AttemptStrategy{}.Start() + if !a.Next() { + t.Fatalf("Failed!") + } + if a.Next() { + t.Fatalf("Failed!") + } + + a = AttemptStrategy{}.Start() + if !a.Next() { + t.Fatalf("Failed!") + } + if a.HasNext() { + t.Fatalf("Failed!") + } + if a.Next() { + t.Fatalf("Failed!") + } + a = AttemptStrategy{Total: 2e8}.Start() + + if !a.Next() { + t.Fatalf("Failed!") + } + if !a.HasNext() { + t.Fatalf("Failed!") + } + time.Sleep(2e8) + + if !a.HasNext() { + t.Fatalf("Failed!") + } + if !a.Next() { + t.Fatalf("Failed!") + } + if a.Next() { + t.Fatalf("Failed!") + } + + a = AttemptStrategy{Total: 1e8, Min: 2}.Start() + time.Sleep(1e8) + + if !a.Next() { + t.Fatalf("Failed!") + } + if !a.HasNext() { + t.Fatalf("Failed!") + } + if !a.Next() { + t.Fatalf("Failed!") + } + if a.HasNext() { + t.Fatalf("Failed!") + } + if a.Next() { + t.Fatalf("Failed!") + } +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go new file mode 100644 index 000000000..54a635691 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go @@ -0,0 +1,113 @@ +package util + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strconv" + "time" +) + +//ConvertToQueryValues converts the struct to url.Values +func ConvertToQueryValues(ifc interface{}) url.Values { + values := url.Values{} + SetQueryValues(ifc, &values) + return values +} + +//SetQueryValues sets the struct to existing url.Values following ECS encoding rules +func SetQueryValues(ifc interface{}, values *url.Values) { + setQueryValues(ifc, values, "") +} + +func setQueryValues(i interface{}, values *url.Values, prefix string) { + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + fieldName := elemType.Field(i).Name + field := elem.Field(i) + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + } + var value string + switch field.Interface().(type) { + case int, int8, int16, int32, int64: + i := field.Int() + if i != 0 { + value = strconv.FormatInt(i, 10) + } + case uint, uint8, uint16, uint32, uint64: + i := field.Uint() + if i != 0 { + value = strconv.FormatUint(i, 10) + } + case float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case []byte: + value = string(field.Bytes()) + case bool: + value = strconv.FormatBool(field.Bool()) + case string: + value = field.String() + case []string: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + + default: + if kind == reflect.Slice { //Array of structs + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValues(ifc, values, prefixName) + } + } + } else { + ifc := field.Interface() + if ifc != nil { + SetQueryValues(ifc, values) + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + values.Set(name, value) + } + } +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go new file mode 100644 index 000000000..a49eb215f --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go @@ -0,0 +1,46 @@ +package util + +import ( + "testing" + "time" +) + +type SubStruct struct { + A string + B int +} + +type TestStruct struct { + Format string + Version string + AccessKeyId string + Timestamp time.Time + Empty string + IntValue int `ArgName:"int-value"` + BoolPtr *bool `ArgName:"bool-ptr"` + IntPtr *int `ArgName:"int-ptr"` + StringArray []string `ArgName:"str-array"` + StructArray []SubStruct +} + +func TestConvertToQueryValues(t *testing.T) { + boolValue := true + request := TestStruct{ + Format: "JSON", + Version: "1.0", + Timestamp: time.Date(2015, time.Month(5), 26, 1, 2, 3, 4, time.UTC), + IntValue: 10, + BoolPtr: &boolValue, + StringArray: []string{"abc", "xyz"}, + StructArray: []SubStruct{ + SubStruct{A: "a", B: 1}, + SubStruct{A: "x", B: 2}, + }, + } + result := ConvertToQueryValues(&request).Encode() + const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D" + if result != expectedResult { + t.Error("Incorrect encoding: ", result) + } + +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go new file mode 100644 index 000000000..121d6e62f --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go @@ -0,0 +1,62 @@ +package util + +import ( + "fmt" + "time" +) + +// GetISO8601TimeStamp gets timestamp string in ISO8601 format +func GetISO8601TimeStamp(ts time.Time) string { + t := ts.UTC() + return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +const formatISO8601 = "2006-01-02T15:04:05Z" +const jsonFormatISO8601 = `"` + formatISO8601 + `"` + +// A ISO6801Time represents a time in ISO8601 format +type ISO6801Time time.Time + +// New constructs a new iso8601.Time instance from an existing +// time.Time instance. This causes the nanosecond field to be set to +// 0, and its time zone set to a fixed zone with no offset from UTC +// (but it is *not* UTC itself). +func New(t time.Time) ISO6801Time { + return ISO6801Time(time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + 0, + time.UTC, + )) +} + +// IsDefault checks if the time is default +func (it *ISO6801Time) IsDefault() bool { + return *it == ISO6801Time{} +} + +// MarshalJSON serializes the ISO6801Time into JSON string +func (it ISO6801Time) MarshalJSON() ([]byte, error) { + return []byte(time.Time(it).Format(jsonFormatISO8601)), nil +} + +// UnmarshalJSON deserializes the ISO6801Time from JSON string +func (it *ISO6801Time) UnmarshalJSON(data []byte) error { + if string(data) == "\"\"" { + return nil + } + t, err := time.ParseInLocation(jsonFormatISO8601, string(data), time.UTC) + if err == nil { + *it = ISO6801Time(t) + } + return err +} + +// String returns the time in ISO6801Time format +func (it ISO6801Time) String() string { + return time.Time(it).String() +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go new file mode 100644 index 000000000..284a23c18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go @@ -0,0 +1,50 @@ +package util + +import ( + "encoding/json" + "testing" + "time" +) + +func TestISO8601Time(t *testing.T) { + now := New(time.Now().UTC()) + + data, err := json.Marshal(now) + if err != nil { + t.Fatal(err) + } + + _, err = time.Parse(`"`+formatISO8601+`"`, string(data)) + if err != nil { + t.Fatal(err) + } + + var now2 ISO6801Time + err = json.Unmarshal(data, &now2) + if err != nil { + t.Fatal(err) + } + + if now != now2 { + t.Fatalf("Time %s does not equal expected %s", now2, now) + } + + if now.String() != now2.String() { + t.Fatalf("String format for %s does not equal expected %s", now2, now) + } + + type TestTimeStruct struct { + A int + B *ISO6801Time + } + var testValue TestTimeStruct + err = json.Unmarshal([]byte("{\"A\": 1, \"B\":\"\"}"), &testValue) + if err != nil { + t.Fatal(err) + } + t.Logf("%v", testValue) + if !testValue.B.IsDefault() { + t.Fatal("Invaid Unmarshal result for ISO6801Time from empty value") + } + +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go new file mode 100644 index 000000000..a00b27c19 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature.go @@ -0,0 +1,40 @@ +package util + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/url" + "strings" +) + +//CreateSignature creates signature for string following Aliyun rules +func CreateSignature(stringToSignature, accessKeySecret string) string { + // Crypto by HMAC-SHA1 + hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) + hmacSha1.Write([]byte(stringToSignature)) + sign := hmacSha1.Sum(nil) + + // Encode to Base64 + base64Sign := base64.StdEncoding.EncodeToString(sign) + + return base64Sign +} + +func percentReplace(str string) string { + str = strings.Replace(str, "+", "%20", -1) + str = strings.Replace(str, "*", "%2A", -1) + str = strings.Replace(str, "%7E", "~", -1) + + return str +} + +// CreateSignatureForRequest creates signature for query string values +func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { + + canonicalizedQueryString := percentReplace(values.Encode()) + + stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) + + return CreateSignature(stringToSign, accessKeySecret) +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go new file mode 100644 index 000000000..e5c22ccac --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/signature_test.go @@ -0,0 +1,14 @@ +package util + +import ( + "testing" +) + +func TestCreateSignature(t *testing.T) { + + str := "GET&%2F&AccessKeyId%3Dtestid%26Action%3DDescribeRegions%26Format%3DXML%26RegionId%3Dregion1%26SignatureMethod%3DHMAC-SHA1%26SignatureNonce%3DNwDAxvLU6tFE0DVb%26SignatureVersion%3D1.0%26TimeStamp%3D2012-12-26T10%253A33%253A56Z%26Version%3D2014-05-26" + + signature := CreateSignature(str, "testsecret") + + t.Log(signature) +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go new file mode 100644 index 000000000..f28266842 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go @@ -0,0 +1,54 @@ +package util + +import ( + "bytes" + "math/rand" + "net/http" + "net/url" + "sort" + "strconv" + "time" +) + +//CreateRandomString create random string +func CreateRandomString() string { + + rand.Seed(time.Now().UnixNano()) + randInt := rand.Int63() + randStr := strconv.FormatInt(randInt, 36) + + return randStr +} + +// Encode encodes the values into ``URL encoded'' form +// ("acl&bar=baz&foo=quux") sorted by key. +func Encode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + if v != "" { + buf.WriteString("=") + buf.WriteString(url.QueryEscape(v)) + } + } + } + return buf.String() +} + +func GetGMTime() string { + return time.Now().UTC().Format(http.TimeFormat) +} From 3d30cb38f6d29a7070626265727593e3d1e8e9f1 Mon Sep 17 00:00:00 2001 From: tgic Date: Mon, 15 Jun 2015 20:03:32 +0800 Subject: [PATCH 05/20] add endpoint support --- Godeps/Godeps.json | 4 +- .../denverdino/aliyungo/oss/client.go | 17 ++- .../denverdino/aliyungo/util/attempt.go | 4 +- .../denverdino/aliyungo/util/encoding.go | 68 ++++++----- .../denverdino/aliyungo/util/encoding_test.go | 8 +- .../denverdino/aliyungo/util/iso6801.go | 4 +- .../denverdino/aliyungo/util/iso6801_test.go | 6 +- .../denverdino/aliyungo/util/util.go | 107 ++++++++++++++++++ .../denverdino/aliyungo/util/util_test.go | 98 ++++++++++++++++ registry/storage/driver/oss/oss.go | 8 ++ registry/storage/driver/oss/oss_test.go | 20 ++-- 11 files changed, 295 insertions(+), 49 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go mode change 100755 => 100644 registry/storage/driver/oss/oss.go mode change 100755 => 100644 registry/storage/driver/oss/oss_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d21f52608..6c03f5446 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -46,11 +46,11 @@ }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb" + "Rev": "56047189188d6558b6a1456d647970032343b511" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "17d1e888c907ffdbd875f37500f3d130ce2ee6eb" + "Rev": "56047189188d6558b6a1456d647970032343b511" }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go index 8e38ea4ae..d67c4d377 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go @@ -35,7 +35,9 @@ type Client struct { Secure bool ConnectTimeout time.Duration ReadTimeout time.Duration - debug bool + + endpoint string + debug bool } // The Bucket type encapsulates operations with an bucket. @@ -159,6 +161,12 @@ func (client *Client) locationConstraint() io.Reader { return strings.NewReader(constraint) } +// override default endpoint +func (client *Client) SetEndpoint(endpoint string) { + // TODO check endpoint + client.endpoint = endpoint +} + // PutBucket creates a new bucket. func (b *Bucket) PutBucket(perm ACL) error { headers := make(http.Header) @@ -963,7 +971,12 @@ func (client *Client) query(req *request, resp interface{}) error { // Sets baseurl on req from bucket name and the region endpoint func (client *Client) setBaseURL(req *request) error { - req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) + + if client.endpoint == "" { + req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) + } else { + req.baseurl = fmt.Sprintf("%s://%s", client.endpoint, getProtocol(client.Secure)) + } return nil } diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go index e71ed19f3..2d07f03a8 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/attempt.go @@ -4,9 +4,11 @@ import ( "time" ) +// AttemptStrategy is reused from the goamz package + // AttemptStrategy represents a strategy for waiting for an action // to complete successfully. This is an internal type used by the -// implementation of other goamz packages. +// implementation of other packages. type AttemptStrategy struct { Total time.Duration // total duration of attempt. Delay time.Duration // interval between each try in the burst. diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go index 54a635691..56b900afa 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding.go @@ -39,49 +39,48 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { } if kind == reflect.Ptr { field = field.Elem() + kind = field.Kind() } var value string - switch field.Interface().(type) { - case int, int8, int16, int32, int64: + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i := field.Int() if i != 0 { value = strconv.FormatInt(i, 10) } - case uint, uint8, uint16, uint32, uint64: + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: i := field.Uint() if i != 0 { value = strconv.FormatUint(i, 10) } - case float32: + case reflect.Float32: value = strconv.FormatFloat(field.Float(), 'f', 4, 32) - case float64: + case reflect.Float64: value = strconv.FormatFloat(field.Float(), 'f', 4, 64) - case []byte: - value = string(field.Bytes()) - case bool: + case reflect.Bool: value = strconv.FormatBool(field.Bool()) - case string: + case reflect.String: value = field.String() - case []string: - l := field.Len() - if l > 0 { - strArray := make([]string, l) - for i := 0; i < l; i++ { - strArray[i] = field.Index(i).String() + case reflect.Slice: + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } } - bytes, err := json.Marshal(strArray) - if err == nil { - value = string(bytes) - } else { - log.Printf("Failed to convert JSON: %v", err) - } - } - case time.Time: - t := field.Interface().(time.Time) - value = GetISO8601TimeStamp(t) - - default: - if kind == reflect.Slice { //Array of structs + default: l := field.Len() for j := 0; j < l; j++ { prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) @@ -91,7 +90,18 @@ func setQueryValues(i interface{}, values *url.Values, prefix string) { setQueryValues(ifc, values, prefixName) } } - } else { + continue + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: ifc := field.Interface() if ifc != nil { SetQueryValues(ifc, values) diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go index a49eb215f..049cd86b3 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/encoding_test.go @@ -5,6 +5,8 @@ import ( "time" ) +type TestString string + type SubStruct struct { A string B int @@ -21,6 +23,8 @@ type TestStruct struct { IntPtr *int `ArgName:"int-ptr"` StringArray []string `ArgName:"str-array"` StructArray []SubStruct + test TestString + tests []TestString } func TestConvertToQueryValues(t *testing.T) { @@ -36,9 +40,11 @@ func TestConvertToQueryValues(t *testing.T) { SubStruct{A: "a", B: 1}, SubStruct{A: "x", B: 2}, }, + test: TestString("test"), + tests: []TestString{TestString("test1"), TestString("test2")}, } result := ConvertToQueryValues(&request).Encode() - const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D" + const expectedResult = "Format=JSON&StructArray.1.A=a&StructArray.1.B=1&StructArray.2.A=x&StructArray.2.B=2&Timestamp=2015-05-26T01%3A02%3A03Z&Version=1.0&bool-ptr=true&int-value=10&str-array=%5B%22abc%22%2C%22xyz%22%5D&test=test&tests=%5B%22test1%22%2C%22test2%22%5D" if result != expectedResult { t.Error("Incorrect encoding: ", result) } diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go index 121d6e62f..031b61751 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801.go @@ -21,7 +21,7 @@ type ISO6801Time time.Time // time.Time instance. This causes the nanosecond field to be set to // 0, and its time zone set to a fixed zone with no offset from UTC // (but it is *not* UTC itself). -func New(t time.Time) ISO6801Time { +func NewISO6801Time(t time.Time) ISO6801Time { return ISO6801Time(time.Date( t.Year(), t.Month(), @@ -58,5 +58,5 @@ func (it *ISO6801Time) UnmarshalJSON(data []byte) error { // String returns the time in ISO6801Time format func (it ISO6801Time) String() string { - return time.Time(it).String() + return time.Time(it).Format(formatISO8601) } diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go index 284a23c18..f2ba96a45 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/iso6801_test.go @@ -7,7 +7,7 @@ import ( ) func TestISO8601Time(t *testing.T) { - now := New(time.Now().UTC()) + now := NewISO6801Time(time.Now().UTC()) data, err := json.Marshal(now) if err != nil { @@ -26,7 +26,7 @@ func TestISO8601Time(t *testing.T) { } if now != now2 { - t.Fatalf("Time %s does not equal expected %s", now2, now) + t.Errorf("Time %s does not equal expected %s", now2, now) } if now.String() != now2.String() { @@ -46,5 +46,5 @@ func TestISO8601Time(t *testing.T) { if !testValue.B.IsDefault() { t.Fatal("Invaid Unmarshal result for ISO6801Time from empty value") } - + t.Logf("ISO6801Time String(): %s", now2.String()) } diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go index f28266842..3e21abfcb 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go @@ -2,12 +2,20 @@ package util import ( "bytes" + srand "crypto/rand" + "encoding/binary" "math/rand" "net/http" "net/url" "sort" "strconv" "time" + "errors" +) + + +const ( + StatusUnKnown = "NA" ) //CreateRandomString create random string @@ -52,3 +60,102 @@ func Encode(v url.Values) string { func GetGMTime() string { return time.Now().UTC().Format(http.TimeFormat) } + +// + +func randUint32() uint32 { + return randUint32Slice(1)[0] +} + +func randUint32Slice(c int) []uint32 { + b := make([]byte, c*4) + + _, err := srand.Read(b) + + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = byte(rand.Int()) + } + } + + n := make([]uint32, c) + + for i := range n { + n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) + } + + return n +} + +func toByte(n uint32, st, ed byte) byte { + return byte(n%uint32(ed-st+1) + uint32(st)) +} + +func toDigit(n uint32) byte { + return toByte(n, '0', '9') +} + +func toLowerLetter(n uint32) byte { + return toByte(n, 'a', 'z') +} + +func toUpperLetter(n uint32) byte { + return toByte(n, 'A', 'Z') +} + +type convFunc func(uint32) byte + +var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} + +// tools for generating a random ECS instance password +// from 8 to 30 char MUST contain digit upper, case letter and upper case letter +// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance +func GenerateRandomECSPassword() string { + + // [8, 30] + l := int(randUint32()%23 + 8) + + n := randUint32Slice(l) + + b := make([]byte, l) + + b[0] = toDigit(n[0]) + b[1] = toLowerLetter(n[1]) + b[2] = toUpperLetter(n[2]) + + for i := 3; i < l; i++ { + b[i] = convFuncs[n[i]%3](n[i]) + } + + s := make([]byte, l) + perm := rand.Perm(l) + for i, v := range perm { + s[v] = b[i] + } + + return string(s) + +} + + +func LoopCall(attempts AttemptStrategy,api func() (bool,interface{},error))(interface{}, error){ + + for attempt := attempts.Start(); attempt.Next(); { + needStop,status,err := api() + + if(err != nil) { + return nil, errors.New("execution failed") + } + if(needStop){ + return status,nil; + } + + if attempt.HasNext() { + continue; + } + return nil,errors.New("timeout execution ") + } + panic("unreachable") +} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go new file mode 100644 index 000000000..354b95f10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go @@ -0,0 +1,98 @@ +package util + +import ( + "testing" + "errors" + "time" +) + +func TestGenerateRandomECSPassword(t *testing.T) { + for i := 0; i < 10; i++ { + s := GenerateRandomECSPassword() + + if len(s) < 8 || len(s) > 30 { + t.Errorf("Generated ECS password [%v]: bad len", s) + } + + hasDigit := false + hasLower := false + hasUpper := false + + for j := range s { + + switch { + case '0' <= s[j] && s[j] <= '9': + hasDigit = true + case 'a' <= s[j] && s[j] <= 'z': + hasLower = true + case 'A' <= s[j] && s[j] <= 'Z': + hasUpper = true + } + } + + if !hasDigit { + t.Errorf("Generated ECS password [%v]: no digit", s) + } + + if !hasLower { + t.Errorf("Generated ECS password [%v]: no lower letter ", s) + } + + if !hasUpper { + t.Errorf("Generated ECS password [%v]: no upper letter", s) + } + } +} + +func TestWaitForSignalWithTimeout(t *testing.T) { + + attempts := AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, + } + + timeoutFunc := func() (bool,interface{},error) { + return false,"-1",nil + } + + begin := time.Now() + + _, timeoutError := LoopCall(attempts, timeoutFunc); + if(timeoutError != nil) { + t.Logf("timeout func complete successful") + } else { + t.Error("Expect timeout result") + } + + end := time.Now() + duration := end.Sub(begin).Seconds() + if( duration > (float64(attempts.Min) -1)) { + t.Logf("timeout func duration is enough") + } else { + t.Error("timeout func duration is not enough") + } + + errorFunc := func() (bool, interface{}, error) { + err := errors.New("execution failed"); + return false,"-1",err + } + + _, failedError := LoopCall(attempts, errorFunc); + if(failedError != nil) { + t.Logf("error func complete successful: " + failedError.Error()) + } else { + t.Error("Expect error result") + } + + successFunc := func() (bool,interface{}, error) { + return true,nil,nil + } + + _, successError := LoopCall(attempts, successFunc); + if(successError != nil) { + t.Error("Expect success result") + } else { + t.Logf("success func complete successful") + } +} diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go old mode 100755 new mode 100644 index 9c52d5773..91ab4b1aa --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -56,6 +56,7 @@ type DriverParameters struct { Secure bool ChunkSize int64 RootDirectory string + Endpoint string } func init() { @@ -175,6 +176,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { rootDirectory = "" } + endpoint, ok := parameters["endpoint"] + if !ok { + endpoint = "" + } + params := DriverParameters{ AccessKeyID: fmt.Sprint(accessKey), AccessKeySecret: fmt.Sprint(secretKey), @@ -185,6 +191,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { Encrypt: encryptBool, Secure: secureBool, Internal: internalBool, + Endpoint: fmt.Sprint(endpoint), } return New(params) @@ -195,6 +202,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) + client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) // Validate that the given credentials have at least read permissions in the diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go old mode 100755 new mode 100644 index ecfe36e9d..2b469f34f --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -27,6 +27,7 @@ func init() { internal := os.Getenv("OSS_INTERNAL") encrypt := os.Getenv("OSS_ENCRYPT") secure := os.Getenv("OSS_SECURE") + endpoint := os.Getenv("OSS_ENDPOINT") root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) @@ -59,15 +60,16 @@ func init() { } parameters := DriverParameters{ - accessKey, - secretKey, - bucket, - alioss.Region(region), - internalBool, - encryptBool, - secureBool, - minChunkSize, - rootDirectory, + AccessKeyID: accessKey, + AccessKeySecret: secretKey, + Bucket: bucket, + Region: alioss.Region(region), + Internal: internalBool, + ChunkSize: minChunkSize, + RootDirectory: rootDirectory, + Encrypt: encryptBool, + Secure: secureBool, + Endpoint: endpoint, } return New(parameters) From ee9971a5afcf257bc3d18b17c06b5cca02aabe0d Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 16 Jun 2015 11:52:25 +0800 Subject: [PATCH 06/20] update godep --- Godeps/Godeps.json | 4 ++-- .../src/github.com/denverdino/aliyungo/oss/client.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 6c03f5446..dbc22b69e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -46,11 +46,11 @@ }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "56047189188d6558b6a1456d647970032343b511" + "Rev": "cab34948f93584226cdaac45adb09f3bb3725875" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "56047189188d6558b6a1456d647970032343b511" + "Rev": "cab34948f93584226cdaac45adb09f3bb3725875" }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go index d67c4d377..17b2d3ce7 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/client.go @@ -975,7 +975,7 @@ func (client *Client) setBaseURL(req *request) error { if client.endpoint == "" { req.baseurl = client.Region.GetEndpoint(client.Internal, req.bucket, client.Secure) } else { - req.baseurl = fmt.Sprintf("%s://%s", client.endpoint, getProtocol(client.Secure)) + req.baseurl = fmt.Sprintf("%s://%s", getProtocol(client.Secure), client.endpoint) } return nil From 3a240de22efb5607387b8b80c6d3b2ce452e54ac Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 16 Jun 2015 14:06:04 +0800 Subject: [PATCH 07/20] check access key and secret before run --- registry/storage/driver/oss/oss.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 91ab4b1aa..21a7e32ab 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -104,11 +104,11 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) { // be summoned when GetAuth is called) accessKey, ok := parameters["accesskeyid"] if !ok { - accessKey = "" + return nil, fmt.Errorf("No accesskeyid parameter provided") } secretKey, ok := parameters["accesskeysecret"] if !ok { - secretKey = "" + return nil, fmt.Errorf("No accesskeysecret parameter provided") } regionName, ok := parameters["region"] From 813543d6e330308058afc11df40acc8ebaae1665 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Thu, 2 Jul 2015 22:42:12 +0800 Subject: [PATCH 08/20] Update the OSS test case for latest code change Signed-off-by: Li Yi --- registry/storage/driver/oss/oss_test.go | 89 +++++++++++-------------- 1 file changed, 38 insertions(+), 51 deletions(-) diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go index 2b469f34f..2749a3d01 100644 --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -17,7 +17,9 @@ import ( // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } -type OSSDriverConstructor func(rootDirectory string) (*Driver, error) +var ossDriverConstructor func(rootDirectory string) (*Driver, error) + +var skipCheck func() string func init() { accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") @@ -34,7 +36,7 @@ func init() { } defer os.Remove(root) - ossDriverConstructor := func(rootDirectory string) (*Driver, error) { + ossDriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) @@ -76,79 +78,64 @@ func init() { } // Skip OSS storage driver tests if environment variable parameters are not provided - skipCheck := func() string { + skipCheck = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" } return "" } - driverConstructor := func() (storagedriver.StorageDriver, error) { + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return ossDriverConstructor(root) + }, skipCheck) +} + +func TestEmptyRootList(t *testing.T) { + if skipCheck() != "" { + t.Skip(skipCheck()) } - testsuites.RegisterInProcessSuite(driverConstructor, skipCheck) - - // ossConstructor := func() (*Driver, error) { - // return ossDriverConstructor(aws.GetRegion(region)) - // } - - RegisterOSSDriverSuite(ossDriverConstructor, skipCheck) - - // testsuites.RegisterIPCSuite(driverName, map[string]string{ - // "accesskey": accessKey, - // "secretkey": secretKey, - // "region": region.Name, - // "bucket": bucket, - // "encrypt": encrypt, - // }, skipCheck) - // } -} - -func RegisterOSSDriverSuite(ossDriverConstructor OSSDriverConstructor, skipCheck testsuites.SkipCheck) { - check.Suite(&OSSDriverSuite{ - Constructor: ossDriverConstructor, - SkipCheck: skipCheck, - }) -} - -type OSSDriverSuite struct { - Constructor OSSDriverConstructor - testsuites.SkipCheck -} - -func (suite *OSSDriverSuite) SetUpSuite(c *check.C) { - if reason := suite.SkipCheck(); reason != "" { - c.Skip(reason) - } -} - -func (suite *OSSDriverSuite) TestEmptyRootList(c *check.C) { validRoot, err := ioutil.TempDir("", "driver-") - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } defer os.Remove(validRoot) - rootedDriver, err := suite.Constructor(validRoot) - c.Assert(err, check.IsNil) - emptyRootDriver, err := suite.Constructor("") - c.Assert(err, check.IsNil) - slashRootDriver, err := suite.Constructor("/") - c.Assert(err, check.IsNil) + rootedDriver, err := ossDriverConstructor(validRoot) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := ossDriverConstructor("") + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := ossDriverConstructor("/") + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) - c.Assert(err, check.IsNil) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { - c.Assert(storagedriver.PathRegexp.MatchString(path), check.Equals, true) + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } } } From 9627c6c5a5a7538b617564e153b84d79613f0bb3 Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 3 Jul 2015 11:31:25 +0800 Subject: [PATCH 09/20] godep update oss sdk Signed-off-by: tgic --- Godeps/Godeps.json | 4 +- .../denverdino/aliyungo/util/util.go | 27 --------- .../denverdino/aliyungo/util/util_test.go | 55 ------------------- 3 files changed, 2 insertions(+), 84 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index dbc22b69e..3dbf5326f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -46,11 +46,11 @@ }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "cab34948f93584226cdaac45adb09f3bb3725875" + "Rev": "641bb4f0ddb552750324cb54c0140ca08865abd0" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "cab34948f93584226cdaac45adb09f3bb3725875" + "Rev": "641bb4f0ddb552750324cb54c0140ca08865abd0" }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go index 3e21abfcb..daa6bb02e 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util.go @@ -10,12 +10,6 @@ import ( "sort" "strconv" "time" - "errors" -) - - -const ( - StatusUnKnown = "NA" ) //CreateRandomString create random string @@ -138,24 +132,3 @@ func GenerateRandomECSPassword() string { return string(s) } - - -func LoopCall(attempts AttemptStrategy,api func() (bool,interface{},error))(interface{}, error){ - - for attempt := attempts.Start(); attempt.Next(); { - needStop,status,err := api() - - if(err != nil) { - return nil, errors.New("execution failed") - } - if(needStop){ - return status,nil; - } - - if attempt.HasNext() { - continue; - } - return nil,errors.New("timeout execution ") - } - panic("unreachable") -} diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go index 354b95f10..87d2a0b83 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/util/util_test.go @@ -2,8 +2,6 @@ package util import ( "testing" - "errors" - "time" ) func TestGenerateRandomECSPassword(t *testing.T) { @@ -43,56 +41,3 @@ func TestGenerateRandomECSPassword(t *testing.T) { } } } - -func TestWaitForSignalWithTimeout(t *testing.T) { - - attempts := AttemptStrategy{ - Min: 5, - Total: 5 * time.Second, - Delay: 200 * time.Millisecond, - } - - timeoutFunc := func() (bool,interface{},error) { - return false,"-1",nil - } - - begin := time.Now() - - _, timeoutError := LoopCall(attempts, timeoutFunc); - if(timeoutError != nil) { - t.Logf("timeout func complete successful") - } else { - t.Error("Expect timeout result") - } - - end := time.Now() - duration := end.Sub(begin).Seconds() - if( duration > (float64(attempts.Min) -1)) { - t.Logf("timeout func duration is enough") - } else { - t.Error("timeout func duration is not enough") - } - - errorFunc := func() (bool, interface{}, error) { - err := errors.New("execution failed"); - return false,"-1",err - } - - _, failedError := LoopCall(attempts, errorFunc); - if(failedError != nil) { - t.Logf("error func complete successful: " + failedError.Error()) - } else { - t.Error("Expect error result") - } - - successFunc := func() (bool,interface{}, error) { - return true,nil,nil - } - - _, successError := LoopCall(attempts, successFunc); - if(successError != nil) { - t.Error("Expect success result") - } else { - t.Logf("success func complete successful") - } -} From 2a8535626f0845dbfc0ce424327c054e39549fe7 Mon Sep 17 00:00:00 2001 From: tgic Date: Sat, 4 Jul 2015 23:53:00 +0800 Subject: [PATCH 10/20] fix oss: got 403 in TestContinueStreamAppendLarge Signed-off-by: tgic --- Godeps/Godeps.json | 4 ++-- .../src/github.com/denverdino/aliyungo/oss/multi.go | 8 ++++++-- registry/storage/driver/oss/oss.go | 4 ++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 3dbf5326f..9fb51c3e3 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -46,11 +46,11 @@ }, { "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "641bb4f0ddb552750324cb54c0140ca08865abd0" + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" }, { "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "641bb4f0ddb552750324cb54c0140ca08865abd0" + "Rev": "0e0f322d0a54b994dea9d32541050d177edf6aa3" }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", diff --git a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go index 454d53b70..5b6491ecc 100644 --- a/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go +++ b/Godeps/_workspace/src/github.com/denverdino/aliyungo/oss/multi.go @@ -139,6 +139,9 @@ func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Option } func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { + // TODO source format a /BUCKET/PATH/TO/OBJECT + // TODO not a good design. API could be changed to PutPartCopyWithinBucket(..., path) and PutPartCopyFromBucket(bucket, path) + headers := make(http.Header) headers.Set("x-oss-copy-source", source) @@ -150,8 +153,9 @@ func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObj sourceBucket := m.Bucket.Client.Bucket(strings.TrimRight(strings.Split(source, "/")[1], "/")) //log.Println("source: ", source) //log.Println("sourceBucket: ", sourceBucket.Name) - //log.Println("HEAD: ", strings.Split(source, "/")[2]) - sourceMeta, err := sourceBucket.Head(strings.Split(source, "/")[2], nil) + //log.Println("HEAD: ", strings.strings.SplitAfterN(source, "/", 3)[2]) + // TODO SplitAfterN can be use in bucket name + sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 3)[2], nil) if err != nil { return nil, Part{}, err } diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 21a7e32ab..b3ab11c98 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -492,7 +492,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // currentLength >= offset >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, - d.Bucket.Name+"/"+d.ossPath(path)) + d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } @@ -586,7 +586,7 @@ func (d *driver) WriteStream(ctx context.Context, path string, offset int64, rea // offset > currentLength >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{}, - d.Bucket.Name+"/"+d.ossPath(path)) + d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } From 8e5c901a26938338806ebe6366394ec76ff1bfcd Mon Sep 17 00:00:00 2001 From: tgic Date: Sun, 5 Jul 2015 01:14:24 +0800 Subject: [PATCH 11/20] fix testcase TestReadStreamWithOffset incompatible with oss Signed-off-by: tgic --- registry/storage/driver/oss/oss.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index b3ab11c98..d12f6ed29 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -276,12 +276,18 @@ func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io. resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) if err != nil { - if ossErr, ok := err.(*oss.Error); ok && ossErr.Code == "InvalidRange" { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - return nil, parseError(path, err) } + + // Due to Aliyun OSS API, status 200 and whole object will be return instead of an + // InvalidRange error when range is invalid. + // + // OSS sever will always return http.StatusPartialContent if range is acceptable. + if resp.StatusCode != http.StatusPartialContent { + resp.Body.Close() + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + return resp.Body, nil } From 76174ad57ed099431ff9ae03068ee98616cc2d5d Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 24 Jul 2015 10:20:50 +0800 Subject: [PATCH 12/20] update doc add endpoint --- docs/storage-drivers/oss.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md index 997490156..449546ed2 100755 --- a/docs/storage-drivers/oss.md +++ b/docs/storage-drivers/oss.md @@ -10,20 +10,22 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Aliy ## Parameters -`accesskeyid`: Your access key ID. +* `accesskeyid`: Your access key ID. -`accesskeysecret`: Your access key secret. +* `accesskeysecret`: Your access key secret. -`region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at [http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region](http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region) +* `region`: The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at -`internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at [http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region](http://docs.aliyun.com/?spm=5176.383663.9.2.0JzTP8#/oss/product-documentation/domain-region) +* `endpoint`: (optional) By default, the endpoint shoulb be `..aliyuncs.com` or `.-internal.aliyuncs.com` (when internal=true). You can change the default endpoint via changing this value. -`bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). +* `internal`: (optional) Using internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at -`encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). +* `bucket`: The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization). -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. +* `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. +* `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to false if not specified. -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). +* `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS. + +* `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). From 539c7f53113144566188c59fa4566b19dd49243d Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 26 Jul 2015 07:35:20 +0800 Subject: [PATCH 13/20] Update the comments for consistence model Change-Id: I161522ee51f247fb17e42844b3699bd9031e34e8 Signed-off-by: Li Yi --- registry/storage/driver/oss/oss.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index d12f6ed29..4e514f378 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -8,8 +8,7 @@ // time for directories (directories are an abstraction for key, value stores) // // Keep in mind that OSS guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true +// that a successful write will mean immediate access to the data written. The only true // guarantee is that once you call Stat and receive a certain file size, that much of // the file is already accessible. package oss From 708ad2811408443c0abc6310b20ef5a236e72d86 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Sun, 26 Jul 2015 10:01:45 +0800 Subject: [PATCH 14/20] Update the comment for the consistency model Change-Id: Iee49afeda1c11d6af8c0f26c96d8ccc328c22757 Signed-off-by: Li Yi --- registry/storage/driver/oss/oss.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 4e514f378..2303ebd0d 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -7,10 +7,6 @@ // Because OSS is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that OSS guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written. The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. package oss import ( From 0058b08eeb8c603c8c416588427553bcdef2d915 Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 28 Jul 2015 12:45:05 +0800 Subject: [PATCH 15/20] add include_oss build tag Signed-off-by: tgic --- registry/storage/driver/oss/doc.go | 3 +++ registry/storage/driver/oss/oss.go | 2 ++ registry/storage/driver/oss/oss_test.go | 2 ++ 3 files changed, 7 insertions(+) create mode 100644 registry/storage/driver/oss/doc.go diff --git a/registry/storage/driver/oss/doc.go b/registry/storage/driver/oss/doc.go new file mode 100644 index 000000000..d1bc932f8 --- /dev/null +++ b/registry/storage/driver/oss/doc.go @@ -0,0 +1,3 @@ +// Package oss implements the Aliyun OSS Storage driver backend. Support can be +// enabled by including the "include_oss" build tag. +package oss diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 2303ebd0d..cbda6d166 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -7,6 +7,8 @@ // Because OSS is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // +// +build include_oss + package oss import ( diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go index 2749a3d01..56ec32085 100644 --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -1,3 +1,5 @@ +// +build include_oss + package oss import ( From 806b890cd643108bfe314e31ecdd999906afb500 Mon Sep 17 00:00:00 2001 From: tgic Date: Tue, 28 Jul 2015 15:32:45 +0800 Subject: [PATCH 16/20] add build tag include_oss to circle.yml --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index e43ecb66d..21191f469 100644 --- a/circle.yml +++ b/circle.yml @@ -21,7 +21,7 @@ machine: BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_rados" + DOCKER_BUILDTAGS: "include_rados include_oss" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" # Ceph config From e53d99a83712413cdc959c17df0bc786e740693b Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 31 Jul 2015 12:39:55 +0800 Subject: [PATCH 17/20] fix goimports Signed-off-by: tgic --- registry/storage/driver/oss/oss.go | 3 ++- registry/storage/driver/oss/oss_test.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index cbda6d166..108ad475d 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -14,7 +14,6 @@ package oss import ( "bytes" "fmt" - "github.com/docker/distribution/context" "io" "io/ioutil" "net/http" @@ -24,6 +23,8 @@ import ( "sync" "time" + "github.com/docker/distribution/context" + "github.com/Sirupsen/logrus" "github.com/denverdino/aliyungo/oss" storagedriver "github.com/docker/distribution/registry/storage/driver" diff --git a/registry/storage/driver/oss/oss_test.go b/registry/storage/driver/oss/oss_test.go index 56ec32085..fbae5d9ca 100644 --- a/registry/storage/driver/oss/oss_test.go +++ b/registry/storage/driver/oss/oss_test.go @@ -3,11 +3,12 @@ package oss import ( + "io/ioutil" + alioss "github.com/denverdino/aliyungo/oss" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" - "io/ioutil" //"log" "os" "strconv" From 3eaab7da9530f2e86881ddbcc2e7d8685744bc8d Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 31 Jul 2015 12:43:04 +0800 Subject: [PATCH 18/20] add link to Aliyun OSS doc Signed-off-by: tgic --- docs/storagedrivers.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/storagedrivers.md b/docs/storagedrivers.md index 519961e18..b014049c4 100644 --- a/docs/storagedrivers.md +++ b/docs/storagedrivers.md @@ -24,6 +24,7 @@ This storage driver package comes bundled with several drivers: - [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). - [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. - [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). +- [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). ## Storage Driver API From 72c794a8102bf3b45a03a97adf15eb83c98cb327 Mon Sep 17 00:00:00 2001 From: tgic Date: Fri, 31 Jul 2015 12:46:54 +0800 Subject: [PATCH 19/20] remove unused code and fix todo format Signed-off-by: tgic --- registry/storage/driver/oss/oss.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/registry/storage/driver/oss/oss.go b/registry/storage/driver/oss/oss.go index 108ad475d..cec320262 100644 --- a/registry/storage/driver/oss/oss.go +++ b/registry/storage/driver/oss/oss.go @@ -209,20 +209,8 @@ func New(params DriverParameters) (*Driver, error) { return nil, err } - // TODO Currently multipart uploads have no timestamps, so this would be unwise + // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new OSS client while another one is running on the same bucket. - // multis, _, err := bucket.ListMulti("", "") - // if err != nil { - // return nil, err - // } - - // for _, multi := range multis { - // err := multi.Abort() - // //TODO appropriate to do this error checking? - // if err != nil { - // return nil, err - // } - // } d := &driver{ Client: client, From d7d34a6c5bcac0c152bbdf81a97057b19256e026 Mon Sep 17 00:00:00 2001 From: Li Yi Date: Tue, 4 Aug 2015 09:18:18 +0800 Subject: [PATCH 20/20] Add the OSS link Change-Id: I940fb50f467ce6fc5fbdd9ceb0f9d848e422ced7 Signed-off-by: Li Yi --- docs/storage-drivers/oss.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage-drivers/oss.md b/docs/storage-drivers/oss.md index 449546ed2..748a31da3 100755 --- a/docs/storage-drivers/oss.md +++ b/docs/storage-drivers/oss.md @@ -6,7 +6,7 @@ IGNORES--> # Aliyun OSS storage driver -An implementation of the `storagedriver.StorageDriver` interface which uses Aliyun OSS for object storage. +An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. ## Parameters