vendor: update all dependencies to latest versions

This commit is contained in:
Nick Craig-Wood 2017-09-30 15:27:27 +01:00
parent 911d121bb9
commit b017fcfe9a
3048 changed files with 537057 additions and 189681 deletions

View file

@ -5,14 +5,11 @@ services:
language: go
go:
- 1.6
- 1.7
env:
matrix:
- GO_VERSION=1.8
- GO_VERSION=1.7
- GO_VERSION=1.6
- GO_VERSION=1.5
- 1.8
- 1.9
- master
cache:
directories:
@ -32,14 +29,14 @@ before_install:
mv linux-amd64 glide-v0.12.3;
fi
- pushd glide-v0.12.3 && sudo cp glide /usr/local/bin && popd
- if [[ ! -d "./snips-v0.1.0" ]]; then
wget https://github.com/yunify/snips/releases/download/v0.1.0/snips-v0.1.0-linux_amd64.tar.gz &&
mkdir snips-v0.1.0 &&
pushd snips-v0.1.0 &&
tar -vxzf ../snips-v0.1.0-linux_amd64.tar.gz &&
- if [[ ! -d "./snips-v0.2.8" ]]; then
wget https://github.com/yunify/snips/releases/download/v0.2.8/snips-v0.2.8-linux_amd64.tar.gz &&
mkdir snips-v0.2.8 &&
pushd snips-v0.2.8 &&
tar -vxzf ../snips-v0.2.8-linux_amd64.tar.gz &&
popd;
fi
- pushd snips-v0.1.0 && sudo cp snips /usr/local/bin && popd
- pushd snips-v0.2.8 && sudo cp snips /usr/local/bin && popd
- popd
- /usr/local/bin/make --version
- /usr/local/bin/glide --version
@ -47,6 +44,7 @@ before_install:
install:
- go get -u github.com/golang/lint/golint;
- glide install
- git submodule init
before_script:
- /usr/local/bin/make update
@ -55,4 +53,4 @@ before_script:
script:
- /usr/local/bin/make check
- /usr/local/bin/make release
- /usr/local/bin/make test-runtime-go-${GO_VERSION}
- /usr/local/bin/make build test test-coverage

View file

@ -1,6 +1,27 @@
# Change Log
All notable changes to QingStor SDK for Go will be documented in this file.
## [v2.2.8] - 2017-09-25
### Added
- Support setting custom SDK logger
## [v2.2.7] - 2017-09-01
### Added
- Support image process APIs
- Add advanced client for image process
### Changed
- Force the zone ID to be lowercase
### Fixed
- Add support for the X-QS-Date header
## [v2.2.6] - 2017-07-21
### Fixed
@ -100,6 +121,8 @@ All notable changes to QingStor SDK for Go will be documented in this file.
- QingStor SDK for the Go programming language.
[v2.2.8]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.7...v2.2.8
[v2.2.7]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.6...v2.2.7
[v2.2.6]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.5...v2.2.6
[v2.2.5]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.4...v2.2.5
[v2.2.4]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.3...v2.2.4

View file

@ -19,7 +19,6 @@ help:
@echo " test to run test"
@echo " test-coverage to run test with coverage"
@echo " test-race to run test with race"
@echo " test-runtime to run test in Go 1.8/1.7/1.6/1.5 in docker"
@echo " integration-test to run integration test"
@echo " release to build and release current version"
@echo " release-source to pack the source code"
@ -55,18 +54,14 @@ generate:
@if [[ ! -f "$$(which snips)" ]]; then \
echo "ERROR: Command \"snips\" not found."; \
fi
snips \
--service=qingstor --service-api-version=latest \
--spec="./specs" --template="./template" --output="./service"
snips -f="./specs/qingstor/2016-01-06/swagger/api_v2.0.json" -t="./template" -o="./service"
gofmt -w .
@echo "ok"
.PHONY: build
build:
@echo "build the SDK"
GOOS=linux GOARCH=amd64 go build ${PKGS_TO_RELEASE}
GOOS=darwin GOARCH=amd64 go build ${PKGS_TO_RELEASE}
GOOS=windows GOARCH=amd64 go build ${PKGS_TO_RELEASE}
go build ${PKGS_TO_RELEASE}
@echo "ok"
.PHONY: test
@ -94,90 +89,6 @@ test-race:
go test -v -race -cpu=1,2,4 ${PKGS_TO_RELEASE}
@echo "ok"
.PHONY: test-runtime
test-runtime: test-runtime-go-1.8 test-runtime-go-1.7 test-runtime-go-1.6 test-runtime-go-1.5
export define DOCKERFILE_GO_1_8
FROM golang:1.8
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.8
test-runtime-go-1.8:
@echo "run test in go 1.8"
echo "$${DOCKERFILE_GO_1_8}" > "dockerfile_go_1.8"
docker build -f "./dockerfile_go_1.8" -t "${PREFIX}:go-1.8" .
rm -f "./dockerfile_go_1.8"
docker run --name "${PREFIX}-go-1.8-unit" -t "${PREFIX}:go-1.8"
docker rm "${PREFIX}-go-1.8-unit"
docker rmi "${PREFIX}:go-1.8"
@echo "ok"
export define DOCKERFILE_GO_1_7
FROM golang:1.7
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.7
test-runtime-go-1.7:
@echo "run test in go 1.7"
echo "$${DOCKERFILE_GO_1_7}" > "dockerfile_go_1.7"
docker build -f "./dockerfile_go_1.7" -t "${PREFIX}:go-1.7" .
rm -f "./dockerfile_go_1.7"
docker run --name "${PREFIX}-go-1.7-unit" -t "${PREFIX}:go-1.7"
docker rm "${PREFIX}-go-1.7-unit"
docker rmi "${PREFIX}:go-1.7"
@echo "ok"
export define DOCKERFILE_GO_1_6
FROM golang:1.6
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.6
test-runtime-go-1.6:
@echo "run test in go 1.6"
echo "$${DOCKERFILE_GO_1_6}" > "dockerfile_go_1.6"
docker build -f "./dockerfile_go_1.6" -t "${PREFIX}:go-1.6" .
rm -f "./dockerfile_go_1.6"
docker run --name "${PREFIX}-go-1.6-unit" -t "${PREFIX}:go-1.6"
docker rm "${PREFIX}-go-1.6-unit"
docker rmi "${PREFIX}:go-1.6"
@echo "ok"
export define DOCKERFILE_GO_1_5
FROM golang:1.5
ENV GO15VENDOREXPERIMENT="1"
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.5
test-runtime-go-1.5:
@echo "run test in go 1.5"
echo "$${DOCKERFILE_GO_1_5}" > "dockerfile_go_1.5"
docker build -f "dockerfile_go_1.5" -t "${PREFIX}:go-1.5" .
rm -f "dockerfile_go_1.5"
docker run --name "${PREFIX}-go-1.5-unit" -t "${PREFIX}:go-1.5"
docker rm "${PREFIX}-go-1.5-unit"
docker rmi "${PREFIX}:go-1.5"
@echo "ok"
.PHONY: integration-test
integration-test:
@echo "run integration test"

View file

@ -0,0 +1,106 @@
package upload
import (
"bytes"
"io"
"errors"
)
const (
// QingStor has a max upload parts limit to 10000.
maxUploadParts = 10000
// We read from stream for read 1024B.
segmentSize = 1024
)
// chunk provides a struct to read file
type chunk struct {
fd io.Reader
cur int64
size int64
partSize int
}
// newChunk creates a FileChunk struct
func newChunk(fd io.Reader, partSize int) *chunk {
f := &chunk{
fd: fd,
partSize: partSize,
}
f.initSize()
return f
}
// nextPart reads the next part of the file
func (f *chunk) nextPart() (io.ReadSeeker, error) {
type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
switch r := f.fd.(type) {
case readerAtSeeker:
var sectionSize int64
var err error
leftSize := f.size - f.cur
if leftSize >= int64(f.partSize) {
sectionSize = int64(f.partSize)
} else if leftSize > 0 {
sectionSize = f.size - f.cur
} else {
err = io.EOF
}
seekReader := io.NewSectionReader(r, f.cur, sectionSize)
f.cur += sectionSize
return seekReader, err
case io.Reader:
buf := make([]byte, segmentSize)
var n, lenBuf int
var err error
var chunk []byte
for {
n, _ = r.Read(buf)
if n == 0 {
if lenBuf == 0 {
err = io.EOF
}
break
}
lenBuf = lenBuf + n
chunk = append(chunk, buf...)
if lenBuf == f.partSize {
break
}
}
partBody := bytes.NewReader(chunk[:lenBuf])
return partBody, err
default:
return nil, errors.New("file does not support read")
}
}
// initSize tries to detect the total stream size, setting u.size. If
// the size is not known, size is set to -1.
func (f *chunk) initSize() {
f.size = -1
switch r := f.fd.(type) {
case io.Seeker:
pos, _ := r.Seek(0, 1)
defer r.Seek(pos, 0)
n, err := r.Seek(0, 2)
if err != nil {
return
}
f.size = n
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if f.size/int64(f.partSize) >= int64(maxUploadParts) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
f.partSize = int(f.size/int64(maxUploadParts)) + 1
}
}
}

View file

@ -0,0 +1,52 @@
package upload
import (
"fmt"
"os"
"os/exec"
"testing"
)
var partSize = 5 * 1024
//Test_newFileChunk is the test function for New
func Test_newFileChunk(t *testing.T) {
setup()
fd, _ := os.Open("test_file")
defer fd.Close()
fr := newChunk(fd, partSize)
if fr.size != 512000 {
t.Fatalf("expected 512000, got %d", fr.size)
}
tearDown()
}
// Test_nextPart is the test function for nextSeekablePart
func Test_nextPart(t *testing.T) {
setup()
fd, _ := os.Open("test_file")
defer fd.Close()
fr := newChunk(fd, partSize)
partBody, err := fr.nextPart()
if err != nil {
fmt.Println(err)
}
temp := make([]byte, 6000)
n, _ := partBody.Read(temp)
if n != partSize {
t.Fatalf("expected 5120, got %d", len(temp))
}
tearDown()
}
func setup() {
exec.Command("dd", "if=/dev/zero", "of=test_file", "bs=1024", "count=500").Output()
}
func tearDown() {
exec.Command("rm", "", "test_file").Output()
}

View file

@ -0,0 +1,110 @@
package upload
import (
"errors"
"github.com/yunify/qingstor-sdk-go/logger"
"github.com/yunify/qingstor-sdk-go/service"
"io"
)
// Uploader struct provides a struct to upload
type Uploader struct {
bucket *service.Bucket
partSize int
}
const smallestPartSize int = 1024 * 1024 * 4
//Init creates a uploader struct
func Init(bucket *service.Bucket, partSize int) *Uploader {
return &Uploader{
bucket: bucket,
partSize: partSize,
}
}
// Upload uploads multi parts of large object
func (u *Uploader) Upload(fd io.Reader, objectKey string) error {
if u.partSize < smallestPartSize {
logger.Errorf("Part size error")
return errors.New("the part size is too small")
}
uploadID, err := u.init(objectKey)
if err != nil {
logger.Errorf("Init multipart upload error" + err.Error())
return err
}
partNumbers, err := u.upload(fd, uploadID, objectKey)
if err != nil {
logger.Errorf("Upload multipart error" + err.Error())
return err
}
err = u.complete(objectKey, uploadID, partNumbers)
if err != nil {
logger.Errorf("Complete upload error" + err.Error())
return err
}
return nil
}
func (u *Uploader) init(objectKey string) (*string, error) {
output, err := u.bucket.InitiateMultipartUpload(
objectKey,
&service.InitiateMultipartUploadInput{},
)
if err != nil {
return nil, err
}
return output.UploadID, nil
}
func (u *Uploader) upload(fd io.Reader, uploadID *string, objectKey string) ([]*service.ObjectPartType, error) {
var partCnt int
partNumbers := []*service.ObjectPartType{}
fileReader := newChunk(fd, u.partSize)
for {
partBody, err := fileReader.nextPart()
if err == io.EOF {
break
}
if err != nil {
logger.Errorf("Get next part failed for %v", err)
return nil, err
}
_, err = u.bucket.UploadMultipart(
objectKey,
&service.UploadMultipartInput{
UploadID: uploadID,
PartNumber: &partCnt,
Body: partBody,
},
)
if err != nil {
logger.Errorf("Upload multipart failed for %v", err)
return nil, err
}
partNumbers = append(partNumbers, &service.ObjectPartType{
PartNumber: service.Int(partCnt - 0),
})
partCnt++
}
return partNumbers, nil
}
func (u *Uploader) complete(objectKey string, uploadID *string, partNumbers []*service.ObjectPartType) error {
_, err := u.bucket.CompleteMultipartUpload(
objectKey,
&service.CompleteMultipartUploadInput{
UploadID: uploadID,
ObjectParts: partNumbers,
},
)
if err != nil {
return err
}
return nil
}

View file

@ -113,7 +113,7 @@ func (c *Config) Check() error {
func (c *Config) LoadDefaultConfig() error {
_, err := yaml.Decode([]byte(DefaultConfigFileContent), c)
if err != nil {
logger.Error("Config parse error: " + err.Error())
logger.Errorf("Config parse error: " + err.Error())
return err
}
@ -127,7 +127,7 @@ func (c *Config) LoadDefaultConfig() error {
func (c *Config) LoadUserConfig() error {
_, err := os.Stat(GetUserConfigFilePath())
if err != nil {
logger.Warn("Installing default config file to \"" + GetUserConfigFilePath() + "\"")
logger.Warnf("Installing default config file to \"" + GetUserConfigFilePath() + "\"")
InstallDefaultUserConfig()
}
@ -143,7 +143,7 @@ func (c *Config) LoadConfigFromFilePath(filepath string) error {
yamlString, err := ioutil.ReadFile(filepath)
if err != nil {
logger.Error("File not found: " + filepath)
logger.Errorf("File not found: " + filepath)
return err
}
@ -157,7 +157,7 @@ func (c *Config) LoadConfigFromContent(content []byte) error {
_, err := yaml.Decode(content, c)
if err != nil {
logger.Error("Config parse error: " + err.Error())
logger.Errorf("Config parse error: " + err.Error())
return err
}

View file

@ -20,7 +20,6 @@ package logger
import (
"fmt"
"io"
"os"
"strings"
"time"
@ -28,7 +27,15 @@ import (
"github.com/sirupsen/logrus"
)
var instance *logrus.Logger
// Logger is the interface of SDK logger.
type Logger interface {
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Warnf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
}
// LogFormatter is used to format log entry.
type LogFormatter struct{}
@ -48,12 +55,8 @@ func (c *LogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
time.Now().Format("2006-01-02T15:04:05.000Z"),
os.Getpid(),
level,
entry.Message)), nil
}
// SetOutput set the destination for the log output
func SetOutput(out io.Writer) {
instance.Out = out
entry.Message),
), nil
}
// CheckLevel checks whether the log level is valid.
@ -66,54 +69,60 @@ func CheckLevel(level string) error {
// GetLevel get the log level string.
func GetLevel() string {
return instance.Level.String()
if l, ok := instance.(*logrus.Logger); ok {
return l.Level.String()
}
return "unknown"
}
// SetLevel sets the log level. Valid levels are "debug", "info", "warn", "error", and "fatal".
func SetLevel(level string) {
lvl, err := logrus.ParseLevel(level)
if err != nil {
Fatal(fmt.Sprintf(`log level not valid: "%s"`, level))
}
instance.Level = lvl
}
// Debug logs a message with severity DEBUG.
func Debug(format string, v ...interface{}) {
output(instance.Debug, format, v...)
}
// Info logs a message with severity INFO.
func Info(format string, v ...interface{}) {
output(instance.Info, format, v...)
}
// Warn logs a message with severity WARN.
func Warn(format string, v ...interface{}) {
output(instance.Warn, format, v...)
}
// Error logs a message with severity ERROR.
func Error(format string, v ...interface{}) {
output(instance.Error, format, v...)
}
// Fatal logs a message with severity ERROR followed by a call to os.Exit().
func Fatal(format string, v ...interface{}) {
output(instance.Fatal, format, v...)
}
func output(origin func(...interface{}), format string, v ...interface{}) {
if len(v) > 0 {
origin(fmt.Sprintf(format, v...))
} else {
origin(format)
if l, ok := instance.(*logrus.Logger); ok {
lvl, err := logrus.ParseLevel(level)
if err != nil {
Fatalf(fmt.Sprintf(`log level not valid: "%s"`, level))
}
l.Level = lvl
}
}
// SetLogger sets the a logger as SDK logger.
func SetLogger(l Logger) {
instance = l
}
// Debugf logs a message with severity DEBUG.
func Debugf(format string, v ...interface{}) {
instance.Debugf(format, v...)
}
// Infof logs a message with severity INFO.
func Infof(format string, v ...interface{}) {
instance.Infof(format, v...)
}
// Warnf logs a message with severity WARN.
func Warnf(format string, v ...interface{}) {
instance.Warnf(format, v...)
}
// Errorf logs a message with severity ERROR.
func Errorf(format string, v ...interface{}) {
instance.Errorf(format, v...)
}
// Fatalf logs a message with severity ERROR followed by a call to os.Exit().
func Fatalf(format string, v ...interface{}) {
instance.Fatalf(format, v...)
}
var instance Logger
func init() {
instance = logrus.New()
instance.Formatter = &LogFormatter{}
instance.Out = os.Stderr
instance.Level = logrus.WarnLevel
l := logrus.New()
l.Formatter = &LogFormatter{}
l.Out = os.Stderr
l.Level = logrus.WarnLevel
instance = l
}

View file

@ -39,7 +39,7 @@ import (
type BaseBuilder struct {
parsedURL string
parsedProperties *map[string]string
parsedParams *map[string]string
parsedQuery *map[string]string
parsedHeaders *map[string]string
parsedBodyString string
parsedBody io.Reader
@ -76,7 +76,7 @@ func (b *BaseBuilder) build() (*http.Request, error) {
}
func (b *BaseBuilder) parse() (*BaseBuilder, error) {
err := b.parseRequestParamsAndHeaders()
err := b.parseRequestQueryAndHeaders()
if err != nil {
return b, err
}
@ -96,15 +96,15 @@ func (b *BaseBuilder) parse() (*BaseBuilder, error) {
return b, nil
}
func (b *BaseBuilder) parseRequestParamsAndHeaders() error {
requestParams := map[string]string{}
func (b *BaseBuilder) parseRequestQueryAndHeaders() error {
requestQuery := map[string]string{}
requestHeaders := map[string]string{}
maps := map[string](map[string]string){
"params": requestParams,
"query": requestQuery,
"headers": requestHeaders,
}
b.parsedParams = &requestParams
b.parsedQuery = &requestQuery
b.parsedHeaders = &requestHeaders
if !b.input.IsValid() {
@ -280,7 +280,7 @@ func (b *BaseBuilder) setupHeaders(httpRequest *http.Request) error {
body.Seek(0, 0)
length = end - start
default:
return errors.New("Can not get Content-Length")
return errors.New("can not get Content-Length")
}
if length > 0 {
httpRequest.ContentLength = length

View file

@ -33,9 +33,9 @@ type FakeProperties struct {
CD *int `name:"c-d"`
}
type FakeInput struct {
ParamA *string `location:"params" name:"a"`
ParamB *string `location:"params" name:"b"`
ParamCD *int `location:"params" name:"c_d" default:"1024"`
ParamA *string `location:"query" name:"a"`
ParamB *string `location:"query" name:"b"`
ParamCD *int `location:"query" name:"c_d" default:"1024"`
HeaderA *string `location:"headers" name:"A"`
HeaderB *time.Time `location:"headers" name:"B" format:"RFC 822"`
HeaderCD *int `location:"headers" name:"C-D"`
@ -109,7 +109,7 @@ func TestBaseBuilder_BuildHTTPRequest(t *testing.T) {
assert.Equal(t, &map[string]string{
"a": "param_a",
"c_d": "1024",
}, builder.parsedParams)
}, builder.parsedQuery)
assert.Equal(t, &map[string]string{
"A": "header_a",
"B": "Thu, 01 Sep 2016 07:30:00 GMT",

View file

@ -75,20 +75,20 @@ func (qb *QingStorBuilder) BuildHTTPRequest(o *data.Operation, i *reflect.Value)
return nil, err
}
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"Built QingStor request: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
httpRequest.URL.String()),
)
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor request headers: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
fmt.Sprint(httpRequest.Header)),
)
if qb.baseBuilder.parsedBodyString != "" {
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor request body string: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
qb.baseBuilder.parsedBodyString),
@ -120,9 +120,9 @@ func (qb *QingStorBuilder) parseURL() error {
return err
}
if qb.baseBuilder.parsedParams != nil {
if qb.baseBuilder.parsedQuery != nil {
queryValue := requestURL.Query()
for key, value := range *qb.baseBuilder.parsedParams {
for key, value := range *qb.baseBuilder.parsedQuery {
queryValue.Set(key, value)
}
requestURL.RawQuery = queryValue.Encode()

View file

@ -216,7 +216,7 @@ func (r *Request) send() error {
retries := r.Operation.Config.ConnectionRetries + 1
for {
if retries > 0 {
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"Sending request: [%d] %s %s",
convert.StringToUnixTimestamp(r.HTTPRequest.Header.Get("Date"), convert.RFC822),
r.Operation.RequestMethod,

View file

@ -41,7 +41,7 @@ type SomeActionInput struct {
Date *time.Time `json:"Date" name:"Date" format:"RFC 822" location:"headers"`
IfModifiedSince *time.Time `json:"If-Modified-Since" name:"If-Modified-Since" format:"RFC 822" location:"headers"`
Range *string `json:"Range" name:"Range" location:"headers"`
UploadID *string `json:"upload_id" name:"upload_id" location:"params"`
UploadID *string `json:"upload_id" name:"upload_id" location:"query"`
Count *int `json:"count" name:"count" location:"elements"`
}

View file

@ -85,11 +85,11 @@ func (qss *QingStorSigner) BuildSignature(request *http.Request) (string, error)
signature := strings.TrimSpace(base64.StdEncoding.EncodeToString(h.Sum(nil)))
authorization := "QS " + qss.AccessKeyID + ":" + signature
logger.Debug(fmt.Sprintf(
logger.Debugf(fmt.Sprintf(
"QingStor authorization: [%d] %s",
convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822),
authorization),
)
authorization,
))
return authorization, nil
}
@ -111,7 +111,7 @@ func (qss *QingStorSigner) BuildQuerySignature(request *http.Request, expires in
qss.AccessKeyID, expires, signature,
)
logger.Debug(fmt.Sprintf(
logger.Debugf(fmt.Sprintf(
"QingStor query signature: [%d] %s",
convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822),
query,
@ -141,7 +141,7 @@ func (qss *QingStorSigner) BuildStringToSign(request *http.Request) (string, err
}
stringToSign += canonicalizedResource
logger.Debug(fmt.Sprintf(
logger.Debugf(fmt.Sprintf(
"QingStor string to sign: [%d] %s",
convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822),
stringToSign,
@ -167,7 +167,7 @@ func (qss *QingStorSigner) BuildQueryStringToSign(request *http.Request, expires
}
stringToSign += canonicalizedResource
logger.Debug(fmt.Sprintf(
logger.Debugf(fmt.Sprintf(
"QingStor query string to sign: [%d] %s",
convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822),
stringToSign,
@ -208,7 +208,7 @@ func (qss *QingStorSigner) buildCanonicalizedResource(request *http.Request) (st
parts := []string{}
for _, key := range keys {
values := query[key]
if qss.paramsToSign(key) {
if qss.queryToSign(key) {
if len(values) > 0 {
if values[0] != "" {
value := strings.TrimSpace(strings.Join(values, ""))
@ -231,7 +231,7 @@ func (qss *QingStorSigner) buildCanonicalizedResource(request *http.Request) (st
path = path + "?" + joinedParts
}
logger.Debug(fmt.Sprintf(
logger.Debugf(fmt.Sprintf(
"QingStor canonicalized resource: [%d] %s",
convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822),
path,
@ -240,7 +240,7 @@ func (qss *QingStorSigner) buildCanonicalizedResource(request *http.Request) (st
return path, nil
}
func (qss *QingStorSigner) paramsToSign(key string) bool {
func (qss *QingStorSigner) queryToSign(key string) bool {
keysMap := map[string]bool{
"acl": true,
"cors": true,
@ -252,6 +252,8 @@ func (qss *QingStorSigner) paramsToSign(key string) bool {
"upload_id": true,
"uploads": true,
"image": true,
"lifecycle": true,
"logging": true,
"response-expires": true,
"response-cache-control": true,
"response-content-type": true,

View file

@ -69,7 +69,7 @@ func (b *BaseUnpacker) exposeStatusCode() error {
if value.IsValid() {
switch value.Interface().(type) {
case *int:
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor response status code: [%d] %d",
convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822),
b.httpResponse.StatusCode,
@ -82,7 +82,7 @@ func (b *BaseUnpacker) exposeStatusCode() error {
}
func (b *BaseUnpacker) parseResponseHeaders() error {
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor response headers: [%d] %s",
convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822),
fmt.Sprint(b.httpResponse.Header),
@ -145,7 +145,7 @@ func (b *BaseUnpacker) parseResponseBody() error {
buffer.ReadFrom(b.httpResponse.Body)
b.httpResponse.Body.Close()
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor response body string: [%d] %s",
convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822),
string(buffer.Bytes()),
@ -168,7 +168,7 @@ func (b *BaseUnpacker) parseResponseElements() error {
buffer.ReadFrom(b.httpResponse.Body)
b.httpResponse.Body.Close()
logger.Info(fmt.Sprintf(
logger.Infof(fmt.Sprintf(
"QingStor response body string: [%d] %s",
convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822),
string(buffer.Bytes()),

View file

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/yunify/qingstor-sdk-go/config"
@ -31,6 +32,7 @@ import (
var _ fmt.State
var _ io.Reader
var _ http.Header
var _ strings.Reader
var _ time.Time
var _ config.Config
@ -42,6 +44,7 @@ type Bucket struct {
// Bucket initializes a new bucket.
func (s *Service) Bucket(bucketName string, zone string) (*Bucket, error) {
zone = strings.ToLower(zone)
properties := &Properties{
BucketName: &bucketName,
Zone: &zone,
@ -743,15 +746,15 @@ func (s *Bucket) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (
// ListMultipartUploadsInput presents input for ListMultipartUploads.
type ListMultipartUploadsInput struct {
// Put all keys that share a common prefix into a list
Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"params"`
Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"query"`
// Limit results returned from the first key after key_marker sorted by alphabetical order
KeyMarker *string `json:"key_marker,omitempty" name:"key_marker" location:"params"`
KeyMarker *string `json:"key_marker,omitempty" name:"key_marker" location:"query"`
// Results count limit
Limit *int `json:"limit,omitempty" name:"limit" location:"params"`
Limit *int `json:"limit,omitempty" name:"limit" location:"query"`
// Limits results to keys that begin with the prefix
Prefix *string `json:"prefix,omitempty" name:"prefix" location:"params"`
Prefix *string `json:"prefix,omitempty" name:"prefix" location:"query"`
// Limit results returned from the first uploading segment after upload_id_marker sorted by the time of upload_id
UploadIDMarker *string `json:"upload_id_marker,omitempty" name:"upload_id_marker" location:"params"`
UploadIDMarker *string `json:"upload_id_marker,omitempty" name:"upload_id_marker" location:"query"`
}
// Validate validates the input for ListMultipartUploads.
@ -838,13 +841,13 @@ func (s *Bucket) ListObjectsRequest(input *ListObjectsInput) (*request.Request,
// ListObjectsInput presents input for ListObjects.
type ListObjectsInput struct {
// Put all keys that share a common prefix into a list
Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"params"`
Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"query"`
// Results count limit
Limit *int `json:"limit,omitempty" name:"limit" location:"params"`
Limit *int `json:"limit,omitempty" name:"limit" location:"query"`
// Limit results to keys that start at this marker
Marker *string `json:"marker,omitempty" name:"marker" location:"params"`
Marker *string `json:"marker,omitempty" name:"marker" location:"query"`
// Limits results to keys that begin with the prefix
Prefix *string `json:"prefix,omitempty" name:"prefix" location:"params"`
Prefix *string `json:"prefix,omitempty" name:"prefix" location:"query"`
}
// Validate validates the input for ListObjects.

View file

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/yunify/qingstor-sdk-go/config"
@ -31,6 +32,7 @@ import (
var _ fmt.State
var _ io.Reader
var _ http.Header
var _ strings.Reader
var _ time.Time
var _ config.Config
@ -88,7 +90,7 @@ func (s *Bucket) AbortMultipartUploadRequest(objectKey string, input *AbortMulti
// AbortMultipartUploadInput presents input for AbortMultipartUpload.
type AbortMultipartUploadInput struct {
// Object multipart upload ID
UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required
UploadID *string `json:"upload_id" name:"upload_id" location:"query"` // Required
}
@ -166,7 +168,7 @@ func (s *Bucket) CompleteMultipartUploadRequest(objectKey string, input *Complet
// CompleteMultipartUploadInput presents input for CompleteMultipartUpload.
type CompleteMultipartUploadInput struct {
// Object multipart upload ID
UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required
UploadID *string `json:"upload_id" name:"upload_id" location:"query"` // Required
// MD5sum of the object part
ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"`
@ -207,6 +209,9 @@ type CompleteMultipartUploadOutput struct {
StatusCode *int `location:"statusCode"`
RequestID *string `location:"requestID"`
// Encryption algorithm of the object
XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"`
}
// DeleteObject does Delete the object.
@ -320,17 +325,17 @@ func (s *Bucket) GetObjectRequest(objectKey string, input *GetObjectInput) (*req
// GetObjectInput presents input for GetObject.
type GetObjectInput struct {
// Specified the Cache-Control response header
ResponseCacheControl *string `json:"response-cache-control,omitempty" name:"response-cache-control" location:"params"`
ResponseCacheControl *string `json:"response-cache-control,omitempty" name:"response-cache-control" location:"query"`
// Specified the Content-Disposition response header
ResponseContentDisposition *string `json:"response-content-disposition,omitempty" name:"response-content-disposition" location:"params"`
ResponseContentDisposition *string `json:"response-content-disposition,omitempty" name:"response-content-disposition" location:"query"`
// Specified the Content-Encoding response header
ResponseContentEncoding *string `json:"response-content-encoding,omitempty" name:"response-content-encoding" location:"params"`
ResponseContentEncoding *string `json:"response-content-encoding,omitempty" name:"response-content-encoding" location:"query"`
// Specified the Content-Language response header
ResponseContentLanguage *string `json:"response-content-language,omitempty" name:"response-content-language" location:"params"`
ResponseContentLanguage *string `json:"response-content-language,omitempty" name:"response-content-language" location:"query"`
// Specified the Content-Type response header
ResponseContentType *string `json:"response-content-type,omitempty" name:"response-content-type" location:"params"`
ResponseContentType *string `json:"response-content-type,omitempty" name:"response-content-type" location:"query"`
// Specified the Expires response header
ResponseExpires *string `json:"response-expires,omitempty" name:"response-expires" location:"params"`
ResponseExpires *string `json:"response-expires,omitempty" name:"response-expires" location:"query"`
// Check whether the ETag matches
IfMatch *string `json:"If-Match,omitempty" name:"If-Match" location:"headers"`
@ -522,19 +527,19 @@ func (s *Bucket) ImageProcessRequest(objectKey string, input *ImageProcessInput)
// ImageProcessInput presents input for ImageProcess.
type ImageProcessInput struct {
// Image process action
Action *string `json:"action" name:"action" location:"params"` // Required
Action *string `json:"action" name:"action" location:"query"` // Required
// Specified the Cache-Control response header
ResponseCacheControl *string `json:"response-cache-control,omitempty" name:"response-cache-control" location:"params"`
ResponseCacheControl *string `json:"response-cache-control,omitempty" name:"response-cache-control" location:"query"`
// Specified the Content-Disposition response header
ResponseContentDisposition *string `json:"response-content-disposition,omitempty" name:"response-content-disposition" location:"params"`
ResponseContentDisposition *string `json:"response-content-disposition,omitempty" name:"response-content-disposition" location:"query"`
// Specified the Content-Encoding response header
ResponseContentEncoding *string `json:"response-content-encoding,omitempty" name:"response-content-encoding" location:"params"`
ResponseContentEncoding *string `json:"response-content-encoding,omitempty" name:"response-content-encoding" location:"query"`
// Specified the Content-Language response header
ResponseContentLanguage *string `json:"response-content-language,omitempty" name:"response-content-language" location:"params"`
ResponseContentLanguage *string `json:"response-content-language,omitempty" name:"response-content-language" location:"query"`
// Specified the Content-Type response header
ResponseContentType *string `json:"response-content-type,omitempty" name:"response-content-type" location:"params"`
ResponseContentType *string `json:"response-content-type,omitempty" name:"response-content-type" location:"query"`
// Specified the Expires response header
ResponseExpires *string `json:"response-expires,omitempty" name:"response-expires" location:"params"`
ResponseExpires *string `json:"response-expires,omitempty" name:"response-expires" location:"query"`
// Check whether the object has been modified
IfModifiedSince *time.Time `json:"If-Modified-Since,omitempty" name:"If-Modified-Since" format:"RFC 822" location:"headers"`
@ -706,11 +711,11 @@ func (s *Bucket) ListMultipartRequest(objectKey string, input *ListMultipartInpu
// ListMultipartInput presents input for ListMultipart.
type ListMultipartInput struct {
// Limit results count
Limit *int `json:"limit,omitempty" name:"limit" location:"params"`
Limit *int `json:"limit,omitempty" name:"limit" location:"query"`
// Object multipart upload part number
PartNumberMarker *int `json:"part_number_marker,omitempty" name:"part_number_marker" location:"params"`
PartNumberMarker *int `json:"part_number_marker,omitempty" name:"part_number_marker" location:"query"`
// Object multipart upload ID
UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required
UploadID *string `json:"upload_id" name:"upload_id" location:"query"` // Required
}
@ -944,6 +949,11 @@ type PutObjectOutput struct {
StatusCode *int `location:"statusCode"`
RequestID *string `location:"requestID"`
// MD5sum of the object
ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"`
// Encryption algorithm of the object
XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"`
}
// UploadMultipart does Upload object multipart.
@ -1000,9 +1010,9 @@ func (s *Bucket) UploadMultipartRequest(objectKey string, input *UploadMultipart
// UploadMultipartInput presents input for UploadMultipart.
type UploadMultipartInput struct {
// Object multipart upload part number
PartNumber *int `json:"part_number" name:"part_number" default:"0" location:"params"` // Required
PartNumber *int `json:"part_number" name:"part_number" default:"0" location:"query"` // Required
// Object multipart upload ID
UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required
UploadID *string `json:"upload_id" name:"upload_id" location:"query"` // Required
// Object multipart content length
ContentLength *int64 `json:"Content-Length,omitempty" name:"Content-Length" location:"headers"`
@ -1062,4 +1072,11 @@ type UploadMultipartOutput struct {
StatusCode *int `location:"statusCode"`
RequestID *string `location:"requestID"`
// MD5sum of the object
ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"`
// Range of response data content
XQSContentCopyRange *string `json:"X-QS-Content-Copy-Range,omitempty" name:"X-QS-Content-Copy-Range" location:"headers"`
// Encryption algorithm of the object
XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"`
}

View file

@ -107,12 +107,12 @@
{{$isBucket := eq $service.Name "Bucket"}}
{{$isObject := eq $service.Name "Object"}}
{{$hasParams := gt (len $operation.Request.Params.Properties) 0}}
{{$hasQuery := gt (len $operation.Request.Query.Properties) 0}}
{{$hasHeaders := gt (len $operation.Request.Headers.Properties) 0}}
{{$hasElements := gt (len $operation.Request.Elements.Properties) 0}}
{{$hasStringBody := eq $operation.Request.Body.Type "string"}}
{{$hasBinaryBody := eq $operation.Request.Body.Type "binary"}}
{{$hasInput := or $hasParams $hasHeaders $hasElements $hasStringBody $hasBinaryBody}}
{{$hasInput := or $hasQuery $hasHeaders $hasElements $hasStringBody $hasBinaryBody}}
{{if $operation.Description -}}
{{if eq $belongs "Bucket" -}}
@ -181,10 +181,10 @@
input = &{{$opID}}Input{}
}
{{end}}
{{$uri := $operation.Request.URI}}
{{$uri := replace $uri "{" "<" -1}}
{{$uri := replace $uri "}" ">" -1}}
{{$uri := dashConnected $uri}}
{{$path := $operation.Request.Path}}
{{$path := replace $path "{" "<" -1}}
{{$path := replace $path "}" ">" -1}}
{{$path := dashConnected $path}}
{{- if ne $belongs "Service"}}
properties := *s.Properties
@ -200,21 +200,18 @@
{{- end}}
APIName: "{{$operation.Name}}",
RequestMethod: "{{$operation.Request.Method}}",
RequestURI: "{{$uri}}",
{{if $operation.Response.StatusCodes -}}
StatusCodes: []int{
{{range $statusCodeNumber, $statusCode := $operation.Response.StatusCodes -}}
{{$statusCodeNumber}},
{{- if $statusCode.Description -}}
// {{$statusCode.Description}}
{{- end}}
RequestURI: "{{$path}}",
StatusCodes: []int{
{{range $keyStatus, $valueStatus := $operation.Responses -}}
{{- if $valueStatus.StatusCode -}}
{{$valueStatus.StatusCode.Code}}, // {{$valueStatus.StatusCode.Description}}
{{else}}
200, // OK
{{end -}}
},
{{else}}
StatusCodes: []int{
{{else}}
200, // OK
},
{{end -}}
{{end -}}
},
}
x := &{{$opID}}Output{}
@ -229,9 +226,9 @@
{{if $hasInput}}
// {{$opID}}Input presents input for {{$opID}}.
type {{$opID}}Input struct {
{{- if $operation.Request.Params.Properties | len}}
{{$data := $operation.Request.Params -}}
{{template "RenderProperties" passThrough $data `location:"params"` $operation.Name}}
{{- if $operation.Request.Query.Properties | len}}
{{$data := $operation.Request.Query -}}
{{template "RenderProperties" passThrough $data `location:"query"` $operation.Name}}
{{end}}
{{- if $operation.Request.Headers.Properties | len}}
@ -259,7 +256,7 @@
// Validate validates the input for {{$opID}}.
func (v *{{$opID}}Input) Validate() error {
{{template "ValidateCustomizedType" passThrough $operation.Request.Params $operation.Name}}
{{template "ValidateCustomizedType" passThrough $operation.Request.Query $operation.Name}}
{{template "ValidateCustomizedType" passThrough $operation.Request.Headers $operation.Name}}
{{template "ValidateCustomizedType" passThrough $operation.Request.Elements $operation.Name}}
@ -272,27 +269,28 @@
StatusCode *int `location:"statusCode"`
RequestID *string `location:"requestID"`
{{range $keyStatus, $valueStatus := $operation.Responses -}}
{{if eq $valueStatus.Body.Type "string"}}
{{if $valueStatus.Body.Description -}}
// {{$valueStatus.Body.Description}}
{{- end}}
Body string `location:"body"`
{{else if eq $valueStatus.Body.Type "binary"}}
{{if $valueStatus.Body.Description -}}
// {{$valueStatus.Body.Description}}
{{- end}}
Body io.ReadCloser `location:"body"`
{{end}}
{{if eq $operation.Response.Body.Type "string"}}
{{if $operation.Response.Body.Description -}}
// {{$operation.Response.Body.Description}}
{{- end}}
Body string `location:"body"`
{{else if eq $operation.Response.Body.Type "binary"}}
{{if $operation.Response.Body.Description -}}
// {{$operation.Response.Body.Description}}
{{- end}}
Body io.ReadCloser `location:"body"`
{{end}}
{{if $valueStatus.Elements.Properties | len}}
{{$data := $valueStatus.Elements}}
{{template "RenderProperties" passThrough $data `location:"elements"` $operation.Name}}
{{end}}
{{if $operation.Response.Elements.Properties | len}}
{{$data := $operation.Response.Elements}}
{{template "RenderProperties" passThrough $data `location:"elements"` $operation.Name}}
{{end}}
{{if $operation.Response.Headers.Properties | len}}
{{$data := $operation.Response.Headers}}
{{template "RenderProperties" passThrough $data `location:"headers"` $operation.Name}}
{{if $valueStatus.Headers.Properties | len}}
{{$data := $valueStatus.Headers}}
{{template "RenderProperties" passThrough $data `location:"headers"` $operation.Name}}
{{end}}
{{end}}
}
{{end}}

View file

@ -23,6 +23,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/yunify/qingstor-sdk-go/config"
@ -34,6 +35,7 @@ import (
var _ fmt.State
var _ io.Reader
var _ http.Header
var _ strings.Reader
var _ time.Time
var _ config.Config
@ -48,6 +50,11 @@ var _ config.Config
func (s *Service) {{$subService.ID | camelCase}}(
{{- template "SubServiceInitParams" passThrough $subService.Properties true -}}
) (*{{$subService.ID | camelCase}}, error) {
{{- range $_, $property := $subService.Properties.Properties -}}
{{if eq $property.ID "zone"}}
{{$property.ID}} = strings.ToLower({{$property.ID}})
{{end}}
{{- end -}}
properties := &Properties{
{{range $_, $property := $subService.Properties.Properties -}}
{{$property.ID | upperFirst}}: &{{$property.ID}},

View file

@ -30,20 +30,21 @@ import (
// Properties presents the service properties.
type Properties struct {
{{- template "RenderProperties" passThrough $service.Properties "" "" -}}
{{range $_, $p := $objectSubService.Properties.Properties -}}
{{- if $p.Description -}}
// {{$p.Description}}
{{end -}}
{{if $p.Enum -}}
// {{camelCase $p.ID}}'s available values: {{commaConnected $p.Enum}}
{{end -}}
{{$p.ID | camelCase | upperFirst}}{{" " -}}
{{template "PropertyType" passThrough $p false}}{{" " -}}
`{{template "PropertyTagsDashConnected" $p}}`{{" " -}}
{{if $p.IsRequired -}}
// Required
{{- end}}
{{if $objectSubService -}}
{{range $_, $p := $objectSubService.Properties.Properties -}}
{{- if $p.Description -}}
// {{$p.Description}}
{{end -}}
{{if $p.Enum -}}
// {{camelCase $p.ID}}'s available values: {{commaConnected $p.Enum}}
{{end -}}
{{$p.ID | camelCase | upperFirst}}{{" " -}}
{{template "PropertyType" passThrough $p false}}{{" " -}}
`{{template "PropertyTagsDashConnected" $p}}`{{" " -}}
{{if $p.IsRequired -}}
// Required
{{- end}}
{{end}}
{{end}}
}

View file

@ -0,0 +1,10 @@
@upload
Feature: the upload feature
Scenario: create the uploader
When initialize uploader
Then uploader is initialized
Scenario: upload large file
When upload a large file
Then the large file is uploaded

View file

@ -42,10 +42,11 @@ func main() {
ObjectFeatureContext(s)
ObjectMultipartFeatureContext(s)
ImageFeatureContext(s)
UploadFeatureContext(s)
}
options := godog.Options{
Format: "pretty",
Paths: []string{"./features"},
Paths: []string{"./features", "./local_features"},
Tags: "",
}
status := godog.RunWithOptions("*", context, options)
@ -108,6 +109,8 @@ type testConfig struct {
RetryWaitTime int `json:"retry_wait_time" yaml:"retry_wait_time"`
MaxRetries int `json:"max_retries" yaml:"max_retries"`
Concurrency int `json:"concurrency"`
}
func loadTestConfig() {

View file

@ -67,9 +67,7 @@ func ObjectFeatureContext(s *godog.Suite) {
}
// --------------------------------------------------------------------------
const concurrency = 16
var putObjectOutputs [concurrency]*qs.PutObjectOutput
var putObjectOutputs []*qs.PutObjectOutput
func putObjectWithKey(objectKey string) error {
_, err = exec.Command("dd", "if=/dev/zero", "of=/tmp/sdk_bin", "bs=1024", "count=1").Output()
@ -78,11 +76,12 @@ func putObjectWithKey(objectKey string) error {
}
defer os.Remove("/tmp/sdk_bin")
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
putObjectOutputs = make([]*qs.PutObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -126,7 +125,7 @@ func putObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -146,14 +145,15 @@ func putObjectStatusCodeIs(statusCode int) error {
}
// --------------------------------------------------------------------------
var copyObjectOutputs [concurrency]*qs.PutObjectOutput
var copyObjectOutputs []*qs.PutObjectOutput
func copyObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
copyObjectOutputs = make([]*qs.PutObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -178,7 +178,7 @@ func copyObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -198,14 +198,15 @@ func copyObjectStatusCodeIs(statusCode int) error {
}
// --------------------------------------------------------------------------
var moveObjectOutputs [concurrency]*qs.PutObjectOutput
var moveObjectOutputs []*qs.PutObjectOutput
func moveObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
moveObjectOutputs = make([]*qs.PutObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -230,7 +231,7 @@ func moveObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -251,14 +252,15 @@ func moveObjectStatusCodeIs(statusCode int) error {
// --------------------------------------------------------------------------
var getObjectOutputs [concurrency]*qs.GetObjectOutput
var getObjectOutputs []*qs.GetObjectOutput
func getObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
getObjectOutputs = make([]*qs.GetObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -279,7 +281,7 @@ func getObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -313,14 +315,15 @@ func getObjectContentLengthIs(length int) error {
// --------------------------------------------------------------------------
var getObjectWithContentTypeRequests [concurrency]*request.Request
var getObjectWithContentTypeRequests []*request.Request
func getObjectWithContentType(objectKey, contentType string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
getObjectWithContentTypeRequests = make([]*request.Request, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -354,7 +357,7 @@ func getObjectWithContentType(objectKey, contentType string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -375,14 +378,15 @@ func getObjectContentTypeIs(contentType string) error {
// --------------------------------------------------------------------------
var getObjectWithQuerySignatureURLs [concurrency]string
var getObjectWithQuerySignatureURLs []string
func getObjectWithQuerySignature(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
getObjectWithQuerySignatureURLs = make([]string, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -414,7 +418,7 @@ func getObjectWithQuerySignature(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -443,14 +447,15 @@ func getObjectWithQuerySignatureContentLengthIs(length int) error {
// --------------------------------------------------------------------------
var headObjectOutputs [concurrency]*qs.HeadObjectOutput
var headObjectOutputs []*qs.HeadObjectOutput
func headObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
headObjectOutputs = make([]*qs.HeadObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -471,7 +476,7 @@ func headObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -492,14 +497,15 @@ func headObjectStatusCodeIs(statusCode int) error {
// --------------------------------------------------------------------------
var optionsObjectOutputs [concurrency]*qs.OptionsObjectOutput
var optionsObjectOutputs []*qs.OptionsObjectOutput
func optionsObjectWithMethodAndOrigin(objectKey, method, origin string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
optionsObjectOutputs = make([]*qs.OptionsObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -524,7 +530,7 @@ func optionsObjectWithMethodAndOrigin(objectKey, method, origin string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -545,15 +551,16 @@ func optionsObjectStatusCodeIs(statusCode int) error {
// --------------------------------------------------------------------------
var deleteObjectOutputs [concurrency]*qs.DeleteObjectOutput
var deleteTheMoveObjectOutputs [concurrency]*qs.DeleteObjectOutput
var deleteObjectOutputs []*qs.DeleteObjectOutput
var deleteTheMoveObjectOutputs []*qs.DeleteObjectOutput
func deleteObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
deleteObjectOutputs = make([]*qs.DeleteObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -574,7 +581,7 @@ func deleteObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err
@ -594,11 +601,12 @@ func deleteObjectStatusCodeIs(statusCode int) error {
}
func deleteTheMoveObjectWithKey(objectKey string) error {
errChan := make(chan error, concurrency)
errChan := make(chan error, tc.Concurrency)
deleteTheMoveObjectOutputs = make([]*qs.DeleteObjectOutput, tc.Concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
wg.Add(tc.Concurrency)
for i := 0; i < tc.Concurrency; i++ {
go func(index int, errChan chan<- error) {
wg.Done()
@ -619,7 +627,7 @@ func deleteTheMoveObjectWithKey(objectKey string) error {
}
wg.Wait()
for i := 0; i < concurrency; i++ {
for i := 0; i < tc.Concurrency; i++ {
err = <-errChan
if err != nil {
return err

View file

@ -5,3 +5,5 @@ bucket_name: access-key-id
retry_wait_time: 3 # seconds
max_retries: 60
concurrency: 1

View file

@ -0,0 +1,67 @@
package main
import (
"os"
"os/exec"
"errors"
"github.com/DATA-DOG/godog"
"github.com/yunify/qingstor-sdk-go/client/upload"
)
var uploader *upload.Uploader
// UploadFeatureContext provides feature context for upload.
func UploadFeatureContext(s *godog.Suite) {
s.Step("initialize uploader$", initializeUploader)
s.Step("uploader is initialized$", uploaderIsInitialized)
s.Step("upload a large file$", uploadLargeFile)
s.Step("the large file is uploaded$", largeFileIsUploaded)
}
var fd *os.File
func initializeUploader() error {
uploadSetup()
PartSize := 4 * 1024 * 1024
fd, err = os.Open("test_file")
if err != nil {
return err
}
uploader = upload.Init(bucket, PartSize)
return nil
}
func uploaderIsInitialized() error {
if uploader == nil {
return errors.New("uploader not initialized")
}
return nil
}
var objectKey string
func uploadLargeFile() error {
objectKey = "test_multipart_upload"
err := uploader.Upload(fd, objectKey)
if err != nil {
return err
}
return nil
}
func largeFileIsUploaded() error {
defer uploadTearDown()
return nil
}
func uploadSetup() {
exec.Command("dd", "if=/dev/zero", "of=test_file", "bs=1024", "count=20480").Output()
}
func uploadTearDown() {
exec.Command("rm", "", "test_file").Output()
}

View file

@ -20,4 +20,4 @@
package sdk
// Version number.
const Version = "2.2.6"
const Version = "2.2.8"