From 466dd22b4480df0473323d9e56182995a96c49e4 Mon Sep 17 00:00:00 2001 From: wuyu Date: Mon, 26 Jun 2017 05:45:22 +0800 Subject: [PATCH] vendor: add qingstor-sdk-go for QingStor --- Gopkg.lock | 20 +- Gopkg.toml | 4 + vendor/github.com/Sirupsen/logrus/.gitignore | 1 + vendor/github.com/Sirupsen/logrus/.travis.yml | 13 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 100 ++ vendor/github.com/Sirupsen/logrus/LICENSE | 21 + vendor/github.com/Sirupsen/logrus/README.md | 501 +++++++ vendor/github.com/Sirupsen/logrus/alt_exit.go | 64 + .../Sirupsen/logrus/alt_exit_test.go | 74 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 275 ++++ .../github.com/Sirupsen/logrus/entry_test.go | 77 ++ .../Sirupsen/logrus/examples/basic/basic.go | 59 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + vendor/github.com/Sirupsen/logrus/exported.go | 193 +++ .../github.com/Sirupsen/logrus/formatter.go | 45 + .../Sirupsen/logrus/formatter_bench_test.go | 101 ++ .../github.com/Sirupsen/logrus/hook_test.go | 122 ++ vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/hooks/syslog/README.md | 39 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 54 + .../logrus/hooks/syslog/syslog_test.go | 26 + .../Sirupsen/logrus/hooks/test/test.go | 95 ++ .../Sirupsen/logrus/hooks/test/test_test.go | 39 + .../Sirupsen/logrus/json_formatter.go | 74 + .../Sirupsen/logrus/json_formatter_test.go | 199 +++ vendor/github.com/Sirupsen/logrus/logger.go | 317 +++++ .../Sirupsen/logrus/logger_bench_test.go | 61 + vendor/github.com/Sirupsen/logrus/logrus.go | 143 ++ .../github.com/Sirupsen/logrus/logrus_test.go | 386 ++++++ .../Sirupsen/logrus/terminal_appengine.go | 10 + .../Sirupsen/logrus/terminal_bsd.go | 10 + .../Sirupsen/logrus/terminal_linux.go | 14 + .../Sirupsen/logrus/terminal_notwindows.go | 28 + .../Sirupsen/logrus/terminal_solaris.go | 21 + .../Sirupsen/logrus/terminal_windows.go | 82 ++ .../Sirupsen/logrus/text_formatter.go | 189 +++ .../Sirupsen/logrus/text_formatter_test.go | 87 ++ vendor/github.com/Sirupsen/logrus/writer.go | 62 + .../github.com/pengsrc/go-shared/.gitignore | 28 + .../github.com/pengsrc/go-shared/.travis.yml | 35 + vendor/github.com/pengsrc/go-shared/LICENSE | 202 +++ vendor/github.com/pengsrc/go-shared/Makefile | 61 + vendor/github.com/pengsrc/go-shared/README.md | 19 + .../pengsrc/go-shared/check/error.go | 21 + .../pengsrc/go-shared/check/error_test.go | 12 + .../pengsrc/go-shared/check/host.go | 11 + .../pengsrc/go-shared/check/host_test.go | 16 + .../pengsrc/go-shared/check/slice.go | 41 + .../pengsrc/go-shared/check/slice_test.go | 27 + .../pengsrc/go-shared/convert/time.go | 56 + .../pengsrc/go-shared/convert/time_test.go | 78 ++ .../pengsrc/go-shared/convert/types.go | 477 +++++++ .../pengsrc/go-shared/convert/types_test.go | 635 +++++++++ .../github.com/pengsrc/go-shared/glide.lock | 28 + .../github.com/pengsrc/go-shared/glide.yaml | 23 + .../github.com/pengsrc/go-shared/json/json.go | 50 + .../pengsrc/go-shared/json/json_test.go | 79 ++ .../pengsrc/go-shared/logger/logger.go | 358 +++++ .../pengsrc/go-shared/logger/logger_test.go | 218 +++ .../pengsrc/go-shared/pid/pidfile.go | 49 + .../pengsrc/go-shared/pid/pidfile_darwin.go | 14 + .../pengsrc/go-shared/pid/pidfile_test.go | 38 + .../pengsrc/go-shared/pid/pidfile_unix.go | 16 + .../pengsrc/go-shared/pid/pidfile_windows.go | 23 + .../pengsrc/go-shared/reopen/reopen.go | 176 +++ .../pengsrc/go-shared/reopen/reopen_test.go | 124 ++ .../github.com/pengsrc/go-shared/rest/rest.go | 151 ++ .../pengsrc/go-shared/rest/rest_test.go | 138 ++ .../github.com/pengsrc/go-shared/yaml/yaml.go | 32 + .../pengsrc/go-shared/yaml/yaml_test.go | 72 + .../yunify/qingstor-sdk-go/.gitignore | 30 + .../yunify/qingstor-sdk-go/.gitmodules | 6 + .../yunify/qingstor-sdk-go/.travis.yml | 51 + .../github.com/yunify/qingstor-sdk-go/AUTHORS | 4 + .../yunify/qingstor-sdk-go/CHANGELOG.md | 107 ++ .../github.com/yunify/qingstor-sdk-go/LICENSE | 201 +++ .../yunify/qingstor-sdk-go/MAINTAINERS | 1 + .../yunify/qingstor-sdk-go/Makefile | 207 +++ .../yunify/qingstor-sdk-go/README.md | 84 ++ .../yunify/qingstor-sdk-go/config/config.go | 172 +++ .../qingstor-sdk-go/config/config_test.go | 108 ++ .../yunify/qingstor-sdk-go/config/contract.go | 74 + .../qingstor-sdk-go/docs/configuration.md | 69 + .../qingstor-sdk-go/docs/installation.md | 39 + .../docs/qingstor_service_usage.md | 220 +++ .../yunify/qingstor-sdk-go/glide.lock | 33 + .../yunify/qingstor-sdk-go/glide.yaml | 4 + .../yunify/qingstor-sdk-go/logger/logger.go | 119 ++ .../qingstor-sdk-go/request/builder/base.go | 303 ++++ .../request/builder/base_test.go | 125 ++ .../request/builder/qingstor.go | 167 +++ .../request/builder/qingstor_test.go | 83 ++ .../qingstor-sdk-go/request/data/input.go | 22 + .../qingstor-sdk-go/request/data/operation.go | 35 + .../request/data/validation.go | 22 + .../request/errors/parameters.go | 53 + .../request/errors/qingstor.go | 36 + .../yunify/qingstor-sdk-go/request/request.go | 232 ++++ .../qingstor-sdk-go/request/request_test.go | 136 ++ .../request/signer/qingstor.go | 259 ++++ .../request/signer/qingstor_test.go | 85 ++ .../qingstor-sdk-go/request/unpacker/base.go | 201 +++ .../request/unpacker/base_test.go | 85 ++ .../request/unpacker/qingstor.go | 72 + .../request/unpacker/qingstor_test.go | 124 ++ .../yunify/qingstor-sdk-go/service/bucket.go | 1218 +++++++++++++++++ .../qingstor-sdk-go/service/convert_types.go | 266 ++++ .../service/convert_types_test.go | 311 +++++ .../yunify/qingstor-sdk-go/service/object.go | 926 +++++++++++++ .../qingstor-sdk-go/service/qingstor.go | 104 ++ .../yunify/qingstor-sdk-go/service/types.go | 463 +++++++ .../qingstor-sdk-go/template/manifest.json | 8 + .../qingstor-sdk-go/template/service.tmpl | 40 + .../qingstor-sdk-go/template/shared.tmpl | 390 ++++++ .../qingstor-sdk-go/template/sub_service.tmpl | 61 + .../qingstor-sdk-go/template/types.tmpl | 62 + .../yunify/qingstor-sdk-go/test/.gitignore | 3 + .../yunify/qingstor-sdk-go/test/bucket.go | 269 ++++ .../yunify/qingstor-sdk-go/test/bucket_acl.go | 79 ++ .../qingstor-sdk-go/test/bucket_cors.go | 95 ++ .../test/bucket_external_mirror.go | 88 ++ .../qingstor-sdk-go/test/bucket_policy.go | 105 ++ .../qingstor-sdk-go/test/config.yaml.example | 15 + .../yunify/qingstor-sdk-go/test/glide.lock | 9 + .../yunify/qingstor-sdk-go/test/glide.yaml | 4 + .../yunify/qingstor-sdk-go/test/main.go | 138 ++ .../yunify/qingstor-sdk-go/test/object.go | 263 ++++ .../qingstor-sdk-go/test/object_multipart.go | 238 ++++ .../yunify/qingstor-sdk-go/test/service.go | 60 + .../test/test_config.yaml.example | 7 + .../yunify/qingstor-sdk-go/test/utils.go | 54 + .../yunify/qingstor-sdk-go/utils/escape.go | 23 + .../qingstor-sdk-go/utils/escape_test.go | 30 + .../yunify/qingstor-sdk-go/version.go | 23 + .../yunify/qingstor-sdk-go/version_test.go | 28 + 136 files changed, 15952 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 vendor/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/test/test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger_bench_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_appengine.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/pengsrc/go-shared/.gitignore create mode 100644 vendor/github.com/pengsrc/go-shared/.travis.yml create mode 100644 vendor/github.com/pengsrc/go-shared/LICENSE create mode 100644 vendor/github.com/pengsrc/go-shared/Makefile create mode 100644 vendor/github.com/pengsrc/go-shared/README.md create mode 100644 vendor/github.com/pengsrc/go-shared/check/error.go create mode 100644 vendor/github.com/pengsrc/go-shared/check/error_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/check/host.go create mode 100644 vendor/github.com/pengsrc/go-shared/check/host_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/check/slice.go create mode 100644 vendor/github.com/pengsrc/go-shared/check/slice_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/convert/time.go create mode 100644 vendor/github.com/pengsrc/go-shared/convert/time_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/convert/types.go create mode 100644 vendor/github.com/pengsrc/go-shared/convert/types_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/glide.lock create mode 100644 vendor/github.com/pengsrc/go-shared/glide.yaml create mode 100644 vendor/github.com/pengsrc/go-shared/json/json.go create mode 100644 vendor/github.com/pengsrc/go-shared/json/json_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/logger/logger.go create mode 100644 vendor/github.com/pengsrc/go-shared/logger/logger_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/pid/pidfile.go create mode 100644 vendor/github.com/pengsrc/go-shared/pid/pidfile_darwin.go create mode 100644 vendor/github.com/pengsrc/go-shared/pid/pidfile_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/pid/pidfile_unix.go create mode 100644 vendor/github.com/pengsrc/go-shared/pid/pidfile_windows.go create mode 100644 vendor/github.com/pengsrc/go-shared/reopen/reopen.go create mode 100644 vendor/github.com/pengsrc/go-shared/reopen/reopen_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/rest/rest.go create mode 100644 vendor/github.com/pengsrc/go-shared/rest/rest_test.go create mode 100644 vendor/github.com/pengsrc/go-shared/yaml/yaml.go create mode 100644 vendor/github.com/pengsrc/go-shared/yaml/yaml_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/.gitignore create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/.gitmodules create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/.travis.yml create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/AUTHORS create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/CHANGELOG.md create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/LICENSE create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/MAINTAINERS create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/Makefile create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/README.md create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/config/config.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/config/config_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/config/contract.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/docs/configuration.md create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/docs/installation.md create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/docs/qingstor_service_usage.md create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/glide.lock create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/glide.yaml create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/logger/logger.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/builder/base.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/builder/base_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/data/input.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/data/operation.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/data/validation.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/errors/parameters.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/errors/qingstor.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/request.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/request_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/convert_types.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/convert_types_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/object.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/qingstor.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/service/types.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/template/manifest.json create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/template/service.tmpl create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/template/shared.tmpl create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/template/sub_service.tmpl create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/template/types.tmpl create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/.gitignore create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/bucket.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/bucket_acl.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/bucket_cors.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/bucket_external_mirror.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/bucket_policy.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/config.yaml.example create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/glide.lock create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/glide.yaml create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/main.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/object.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/object_multipart.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/service.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/test_config.yaml.example create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/test/utils.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/utils/escape.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/utils/escape_test.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/version.go create mode 100644 vendor/github.com/yunify/qingstor-sdk-go/version_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 03ffe224e..76f08ae6b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -13,6 +13,12 @@ revision = "a5913b3f7deecba45e98ff33cefbac4fd204ddd7" version = "v0.10.0" +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "202f25545ea4cf9b191ff7f846df5d87c9382c2b" + version = "v1.0.0" + [[projects]] branch = "master" name = "github.com/Unknwon/goconfig" @@ -127,6 +133,12 @@ packages = ["."] revision = "4ed959e0540971545eddb8c75514973d670cf739" +[[projects]] + name = "github.com/pengsrc/go-shared" + packages = ["check","convert","json","yaml"] + revision = "454950d6a0782c34427d4f29b46c6bf447256f20" + version = "v0.0.8" + [[projects]] branch = "master" name = "github.com/pkg/errors" @@ -193,6 +205,12 @@ packages = ["."] revision = "ba9c9e33906f58169366275e3450db66139a31a9" +[[projects]] + name = "github.com/yunify/qingstor-sdk-go" + packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"] + revision = "26f2cc6f249f4c2a08ed89f1d7d566a463c1dfc2" + version = "v2.2.5" + [[projects]] branch = "master" name = "golang.org/x/crypto" @@ -250,6 +268,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "b04805ec8dc2bf704d0ad0c73e2ce9f13f3770b946a70bda8fd602886ff124a4" + inputs-digest = "904bc0ea1f770b0473b24560dc0d24c5c647971e959e58538799c5cad1eaa97e" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 1141509ae..60ba46208 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -132,3 +132,7 @@ [[constraint]] branch = "master" name = "github.com/ncw/dropbox-sdk-go-unofficial" + +[[constraint]] + branch = "master" + name = "github.com/yunify/qingstor-sdk-go" diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 000000000..66be63a00 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..924f3c46b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - tip +env: + - GOMAXPROCS=4 GORACE=halt_on_error=1 +install: + - go get github.com/stretchr/testify/assert +script: + - go test -race -v . + - cd hooks/null && go test -race -v . diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..63d415e12 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,100 @@ +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..cbe8b6962 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,501 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +**Seeing weird case-sensitive problems?** Unfortunately, the author failed to +realize the consequences of renaming to lower-case. Due to the Go package +environment, this caused issues. Regretfully, there's no turning back now. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +I am terribly sorry for this inconvenience. Logrus strives hard for backwards +compatibility, and the author failed to realize the cascading consequences of +such a name-change. To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | +| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | +| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/null" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := null.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8af90637a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go new file mode 100644 index 000000000..d18296348 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit_test.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "io/ioutil" + "os/exec" + "testing" + "time" +) + +func TestRegister(t *testing.T) { + current := len(handlers) + RegisterExitHandler(func() {}) + if len(handlers) != current+1 { + t.Fatalf("can't add handler") + } +} + +func TestHandler(t *testing.T) { + gofile := "/tmp/testprog.go" + if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { + t.Fatalf("can't create go file") + } + + outfile := "/tmp/testprog.out" + arg := time.Now().UTC().String() + err := exec.Command("go", "run", gofile, outfile, arg).Run() + if err == nil { + t.Fatalf("completed normally, should have failed") + } + + data, err := ioutil.ReadFile(outfile) + if err != nil { + t.Fatalf("can't read output file %s", outfile) + } + + if string(data) != arg { + t.Fatalf("bad data") + } +} + +var testprog = []byte(` +// Test program for atexit, gets output file and data as arguments and writes +// data to output file in atexit handler. +package main + +import ( + "github.com/sirupsen/logrus" + "flag" + "fmt" + "io/ioutil" +) + +var outfile = "" +var data = "" + +func handler() { + ioutil.WriteFile(outfile, []byte(data), 0666) +} + +func badHandler() { + n := 0 + fmt.Println(1/n) +} + +func main() { + flag.Parse() + outfile = flag.Arg(0) + data = flag.Arg(1) + + logrus.RegisterExitHandler(handler) + logrus.RegisterExitHandler(badHandler) + logrus.Fatal("Bye bye") +} +`) diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 000000000..da67aba06 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..320e5d5b8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.level() >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.level() >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.level() >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.level() >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.level() >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.level() >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 000000000..99c3b41d5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,77 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 000000000..3e112b4ee --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,59 @@ +package main + +import ( + "github.com/sirupsen/logrus" + // "os" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 000000000..c8470c387 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..1aeaa90ba --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.setLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.level() +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..b5fbe934d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000..d9481589f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,101 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + logger := New() + + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + Logger: logger, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 000000000..13f34cb6f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 000000000..92b391c17 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/sirupsen/logrus" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 000000000..204f0016d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,54 @@ +// +build !windows,!nacl,!plan9 + +package logrus_syslog + +import ( + "fmt" + "github.com/sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 000000000..8d7fbe452 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go new file mode 100644 index 000000000..62c4845df --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test.go @@ -0,0 +1,95 @@ +// The Test package is used for testing logrus. It is here for backwards +// compatibility from when logrus' organization was upper-case. Please use +// lower-case logrus and the `null` package instead of this one. +package test + +import ( + "io/ioutil" + "sync" + + "github.com/sirupsen/logrus" +) + +// Hook is a hook designed for dealing with logs in test scenarios. +type Hook struct { + // Entries is an array of all entries that have been received by this hook. + // For safe access, use the AllEntries() method, rather than reading this + // value directly. + Entries []*logrus.Entry + mu sync.RWMutex +} + +// NewGlobal installs a test hook for the global logger. +func NewGlobal() *Hook { + + hook := new(Hook) + logrus.AddHook(hook) + + return hook + +} + +// NewLocal installs a test hook for a given local logger. +func NewLocal(logger *logrus.Logger) *Hook { + + hook := new(Hook) + logger.Hooks.Add(hook) + + return hook + +} + +// NewNullLogger creates a discarding logger and installs the test hook. +func NewNullLogger() (*logrus.Logger, *Hook) { + + logger := logrus.New() + logger.Out = ioutil.Discard + + return logger, NewLocal(logger) + +} + +func (t *Hook) Fire(e *logrus.Entry) error { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = append(t.Entries, e) + return nil +} + +func (t *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// LastEntry returns the last entry that was logged or nil. +func (t *Hook) LastEntry() *logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + i := len(t.Entries) - 1 + if i < 0 { + return nil + } + // Make a copy, for safety + e := *t.Entries[i] + return &e +} + +// AllEntries returns all entries that were logged. +func (t *Hook) AllEntries() []*logrus.Entry { + t.mu.RLock() + defer t.mu.RUnlock() + // Make a copy so the returned value won't race with future log requests + entries := make([]*logrus.Entry, len(t.Entries)) + for i, entry := range t.Entries { + // Make a copy, for safety + e := *entry + entries[i] = &e + } + return entries +} + +// Reset removes all Entries from this test hook. +func (t *Hook) Reset() { + t.mu.Lock() + defer t.mu.Unlock() + t.Entries = make([]*logrus.Entry, 0) +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go new file mode 100644 index 000000000..3f55cfe31 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go @@ -0,0 +1,39 @@ +package test + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestAllHooks(t *testing.T) { + + assert := assert.New(t) + + logger, hook := NewNullLogger() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + logger.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + + logger.Warn("Hello warning") + assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) + assert.Equal("Hello warning", hook.LastEntry().Message) + assert.Equal(2, len(hook.Entries)) + + hook.Reset() + assert.Nil(hook.LastEntry()) + assert.Equal(0, len(hook.Entries)) + + hook = NewGlobal() + + logrus.Error("Hello error") + assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal("Hello error", hook.LastEntry().Message) + assert.Equal(1, len(hook.Entries)) + +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..e787ea175 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000..51093a79b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,199 @@ +package logrus + +import ( + "encoding/json" + "errors" + "strings" + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} + +func TestJSONMessageKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyMsg: "message", + }, + } + + b, err := formatter.Format(&Entry{Message: "oh hai"}) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) { + t.Fatal("Expected JSON to format message key") + } +} + +func TestJSONLevelKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyLevel: "somelevel", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "somelevel") { + t.Fatal("Expected JSON to format level key") + } +} + +func TestJSONTimeKey(t *testing.T) { + formatter := &JSONFormatter{ + FieldMap: FieldMap{ + FieldKeyTime: "timeywimey", + }, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, "timeywimey") { + t.Fatal("Expected JSON to format time key") + } +} + +func TestJSONDisableTimestamp(t *testing.T) { + formatter := &JSONFormatter{ + DisableTimestamp: true, + } + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if strings.Contains(s, FieldKeyTime) { + t.Error("Did not prevent timestamp", s) + } +} + +func TestJSONEnableTimestamp(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + s := string(b) + if !strings.Contains(s, FieldKeyTime) { + t.Error("Timestamp not present", s) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..370fff5d1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,317 @@ +package logrus + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.level() >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.level() >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.level() >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.level() >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.level() >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.level() >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) setLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} diff --git a/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go new file mode 100644 index 000000000..dd23a3535 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger_bench_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "os" + "testing" +) + +// smallFields is a small size data set for benchmarking +var loggerFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +func BenchmarkDummyLogger(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkDummyLoggerNoLock(b *testing.B) { + nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + b.Fatalf("%v", err) + } + defer nullf.Close() + doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) +} + +func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} + +func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { + logger := Logger{ + Out: out, + Level: InfoLevel, + Formatter: formatter, + } + logger.SetNoLock() + entry := logger.WithFields(fields) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + entry.Info("aaa") + } + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..dd3899974 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000..78cbc2825 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,386 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("PANIC") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("FATAL") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("ERROR") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARN") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("WARNING") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("INFO") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("DEBUG") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} + +// Compile test +func TestLogrusInterface(t *testing.T) { + var buffer bytes.Buffer + fn := func(l FieldLogger) { + b := l.WithField("key", "value") + b.Debug("Test") + } + // test logger + logger := New() + logger.Out = &buffer + fn(logger) + + // test Entry + e := logger.WithField("another", "value") + fn(e) +} + +// Implements io.Writer using channels for synchronization, so we can wait on +// the Entry.Writer goroutine to write in a non-racey way. This does assume that +// there is a single call to Logger.Out for each message. +type channelWriter chan []byte + +func (cw channelWriter) Write(p []byte) (int, error) { + cw <- p + return len(p), nil +} + +func TestEntryWriter(t *testing.T) { + cw := channelWriter(make(chan []byte, 1)) + log := New() + log.Out = cw + log.Formatter = new(JSONFormatter) + log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n")) + + bs := <-cw + var fields Fields + err := json.Unmarshal(bs, &fields) + assert.Nil(t, err) + assert.Equal(t, fields["foo"], "bar") + assert.Equal(t, fields["level"], "warning") +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go new file mode 100644 index 000000000..e011a8694 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,10 @@ +// +build appengine + +package logrus + +import "io" + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..5f6be4d3c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..308160ca8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..190297abf --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,28 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + var termios Termios + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 000000000..3c86b1abe --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..7a336307e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,82 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "bytes" + "errors" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") +) + +const ( + enableProcessedOutput = 0x0001 + enableWrapAtEolOutput = 0x0002 + enableVirtualTerminalProcessing = 0x0004 +) + +func getVersion() (float64, error) { + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + cmd := exec.Command("cmd", "ver") + cmd.Stdout = stdout + cmd.Stderr = stderr + err := cmd.Run() + if err != nil { + return -1, err + } + + // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]" + version := strings.Replace(stdout.String(), "\n", "", -1) + version = strings.Replace(version, "\r\n", "", -1) + + x1 := strings.Index(version, "[Version") + + if x1 == -1 || strings.Index(version, "]") == -1 { + return -1, errors.New("Can't determine Windows version") + } + + return strconv.ParseFloat(version[x1+9:x1+13], 64) +} + +func init() { + ver, err := getVersion() + if err != nil { + return + } + + // Activate Virtual Processing for Windows CMD + // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + if ver >= 10 { + handle := syscall.Handle(os.Stderr.Fd()) + procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing) + } +} + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..ba8885406 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,189 @@ +package logrus + +import ( + "bytes" + "fmt" + "sort" + "strings" + "sync" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time +) + +func init() { + baseTimestamp = time.Now() +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // QuoteCharacter can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !f.needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) + } + case error: + errmsg := value.Error() + if !f.needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000..9793b5f37 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,87 @@ +package logrus + +import ( + "bytes" + "errors" + "strings" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte(tf.QuoteCharacter)) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "") + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) + + // Test for custom quote character. + tf.QuoteCharacter = "`" + checkQuoting(false, "") + checkQuoting(false, "abcd") + checkQuoting(true, "/foobar") + checkQuoting(true, errors.New("invalid argument")) + + // Test for multi-character quotes. + tf.QuoteCharacter = "§~±" + checkQuoting(false, "abcd") + checkQuoting(true, errors.New("invalid argument")) + + // Test for quoting empty fields. + tf.QuoteEmptyFields = true + checkQuoting(true, "") + checkQuoting(false, "abcd") + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5+len(customFormatter.QuoteCharacter) : timeEnd-1-len(customFormatter.QuoteCharacter)] + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +func TestDisableTimestampWithColoredOutput(t *testing.T) { + tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} + + b, _ := tf.Format(WithField("test", "test")) + if strings.Contains(string(b), "[0000]") { + t.Error("timestamp not expected when DisableTimestamp is true") + } +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..7bdebedc6 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,62 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/pengsrc/go-shared/.gitignore b/vendor/github.com/pengsrc/go-shared/.gitignore new file mode 100644 index 000000000..b1a2f490a --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +vendor + +coverage diff --git a/vendor/github.com/pengsrc/go-shared/.travis.yml b/vendor/github.com/pengsrc/go-shared/.travis.yml new file mode 100644 index 000000000..03f3066bb --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/.travis.yml @@ -0,0 +1,35 @@ +language: go +go: + - 1.8 + - 1.7 + - 1.6 + +cache: + directories: + - ${HOME}/source + +before_install: + - pushd ${HOME}/source + - if [[ ! -d "./make-4.0" ]]; then + wget http://ftp.gnu.org/gnu/make/make-4.0.tar.gz && + tar -vxzf make-4.0.tar.gz && + pushd make-4.0 && ./configure --prefix=/usr/local && make && popd; + fi + - pushd make-4.0 && sudo make install && popd + - if [[ ! -d "./glide-v0.12.3" ]]; then + wget https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-linux-amd64.tar.gz && + tar -vxzf glide-v0.12.3-linux-amd64.tar.gz && + mv linux-amd64 glide-v0.12.3; + fi + - pushd glide-v0.12.3 && sudo cp glide /usr/local/bin && popd + - popd + - /usr/local/bin/make --version + - /usr/local/bin/glide --version + +install: + - go get -u github.com/golang/lint/golint + - glide install + +script: + - /usr/local/bin/make check + - /usr/local/bin/make test-coverage diff --git a/vendor/github.com/pengsrc/go-shared/LICENSE b/vendor/github.com/pengsrc/go-shared/LICENSE new file mode 100644 index 000000000..99e1d3548 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Jingwen Peng + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/pengsrc/go-shared/Makefile b/vendor/github.com/pengsrc/go-shared/Makefile new file mode 100644 index 000000000..016e12546 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/Makefile @@ -0,0 +1,61 @@ +SHELL := /bin/bash + +PACKAGE_NAME="github.com/pengsrc/go-shared" + +DIRS_TO_CHECK=$(shell ls -d */ | grep -v "vendor") +PKGS_TO_CHECK=$(shell go list ./... | grep -vE "/vendor") + +ifneq (${PKG},) + DIRS_TO_CHECK="./${PKG}" + PKGS_TO_CHECK="${PACKAGE_NAME}/${PKG}" +endif + +.PHONY: help +help: + @echo "Please use \`make \` where is one of" + @echo " check to vet and lint" + @echo " test to run test" + @echo " test-coverage to run test with coverage" + +.PHONY: check +check: format vet lint + +.PHONY: format +format: + @gofmt -w . + @echo "ok" + +.PHONY: vet +vet: + @echo "go tool vet, skipping vendor packages" + @go tool vet -all ${DIRS_TO_CHECK} + @echo "ok" + +.PHONY: lint +lint: + @echo "golint, skipping vendor packages" + @lint=$$(for pkg in ${PKGS_TO_CHECK}; do golint $${pkg}; done); \ + lint=$$(echo "$${lint}"); \ + if [[ -n $${lint} ]]; then echo "$${lint}"; exit 1; fi + @echo "ok" + +.PHONY: update +.PHONY: test +test: + @echo "run test" + @go test -v ${PKGS_TO_CHECK} + @echo "ok" + +.PHONY: test-coverage +test-coverage: + @echo "run test with coverage" + @for pkg in ${PKGS_TO_CHECK}; do \ + output="coverage$${pkg#${PACKAGE_NAME}}"; \ + mkdir -p $${output}; \ + go test -v -cover -coverprofile="$${output}/profile.out" $${pkg}; \ + if [[ -e "$${output}/profile.out" ]]; then \ + go tool cover -html="$${output}/profile.out" \ + -o "$${output}/profile.html"; \ + fi; \ + done + @echo "ok" diff --git a/vendor/github.com/pengsrc/go-shared/README.md b/vendor/github.com/pengsrc/go-shared/README.md new file mode 100644 index 000000000..7eeb25d0b --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/README.md @@ -0,0 +1,19 @@ +# go-shared + +[![Build Status](https://travis-ci.org/pengsrc/go-shared.svg?branch=master)](https://travis-ci.org/pengsrc/go-shared) +[![Go Report Card](https://goreportcard.com/badge/github.com/pengsrc/go-shared)](https://goreportcard.com/report/github.com/pengsrc/go-shared) +[![License](http://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/yunify/qingstor-sdk-go/blob/master/LICENSE) + +Useful packages for the Go programming language. + +## Contributing + +1. Fork it ( https://github.com/pengsrc/go-shared/fork ) +2. Create your feature branch (`git checkout -b new-feature`) +3. Commit your changes (`git commit -asm 'Add some feature'`) +4. Push to the branch (`git push origin new-feature`) +5. Create a new Pull Request + +## LICENSE + +The Apache License (Version 2.0, January 2004). diff --git a/vendor/github.com/pengsrc/go-shared/check/error.go b/vendor/github.com/pengsrc/go-shared/check/error.go new file mode 100644 index 000000000..88edfce71 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/error.go @@ -0,0 +1,21 @@ +package check + +import ( + "fmt" + "os" +) + +// ErrorForExit check the error. +// If error is not nil, print the error message and exit the application. +// If error is nil, do nothing. +func ErrorForExit(name string, err error, code ...int) { + if err != nil { + exitCode := 1 + if len(code) > 0 { + exitCode = code[0] + } + fmt.Fprintf(os.Stderr, "%s: %s (%d)\n", name, err.Error(), exitCode) + fmt.Fprintf(os.Stderr, "See \"%s --help\".\n", name) + os.Exit(exitCode) + } +} diff --git a/vendor/github.com/pengsrc/go-shared/check/error_test.go b/vendor/github.com/pengsrc/go-shared/check/error_test.go new file mode 100644 index 000000000..0f1e8a74e --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/error_test.go @@ -0,0 +1,12 @@ +package check + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCheckErrorForExit(t *testing.T) { + ErrorForExit("name", nil) + assert.True(t, true) +} diff --git a/vendor/github.com/pengsrc/go-shared/check/host.go b/vendor/github.com/pengsrc/go-shared/check/host.go new file mode 100644 index 000000000..ad789fb7f --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/host.go @@ -0,0 +1,11 @@ +package check + +import ( + "regexp" +) + +// HostAndPort checks whether a string contains host and port. +// It returns true if matched. +func HostAndPort(hostAndPort string) bool { + return regexp.MustCompile(`^[^:]+:[0-9]+$`).MatchString(hostAndPort) +} diff --git a/vendor/github.com/pengsrc/go-shared/check/host_test.go b/vendor/github.com/pengsrc/go-shared/check/host_test.go new file mode 100644 index 000000000..60f562c8b --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/host_test.go @@ -0,0 +1,16 @@ +package check + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCheckHostAndPort(t *testing.T) { + assert.False(t, HostAndPort("127.0.0.1:80:90")) + assert.False(t, HostAndPort("127.0.0.1")) + assert.False(t, HostAndPort("mysql")) + assert.False(t, HostAndPort("mysql:mysql")) + assert.True(t, HostAndPort("mysql:3306")) + assert.True(t, HostAndPort("172.16.70.50:6379")) +} diff --git a/vendor/github.com/pengsrc/go-shared/check/slice.go b/vendor/github.com/pengsrc/go-shared/check/slice.go new file mode 100644 index 000000000..8c98815c2 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/slice.go @@ -0,0 +1,41 @@ +package check + +// StringSliceContains iterates over the slice to find the target. +func StringSliceContains(slice []string, target string) bool { + for _, v := range slice { + if v == target { + return true + } + } + return false +} + +// IntSliceContains iterates over the slice to find the target. +func IntSliceContains(slice []int, target int) bool { + for _, v := range slice { + if v == target { + return true + } + } + return false +} + +// Int32SliceContains iterates over the slice to find the target. +func Int32SliceContains(slice []int32, target int32) bool { + for _, v := range slice { + if v == target { + return true + } + } + return false +} + +// Int64SliceContains iterates over the slice to find the target. +func Int64SliceContains(slice []int64, target int64) bool { + for _, v := range slice { + if v == target { + return true + } + } + return false +} diff --git a/vendor/github.com/pengsrc/go-shared/check/slice_test.go b/vendor/github.com/pengsrc/go-shared/check/slice_test.go new file mode 100644 index 000000000..92704ca65 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/check/slice_test.go @@ -0,0 +1,27 @@ +package check + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStringSliceContains(t *testing.T) { + assert.True(t, StringSliceContains([]string{"1", "2", "3"}, "2")) + assert.False(t, StringSliceContains([]string{"1", "2", "3"}, "4")) +} + +func TestIntSliceContains(t *testing.T) { + assert.True(t, IntSliceContains([]int{1, 2, 3, 4, 5, 6}, 4)) + assert.False(t, IntSliceContains([]int{1, 2, 3, 4, 5, 6}, 7)) +} + +func TestInt32SliceContains(t *testing.T) { + assert.True(t, Int32SliceContains([]int32{1, 2, 3, 4, 5, 6}, 4)) + assert.False(t, Int32SliceContains([]int32{1, 2, 3, 4, 5, 6}, 7)) +} + +func TestInt64SliceContains(t *testing.T) { + assert.True(t, Int64SliceContains([]int64{1, 2, 3, 4, 5, 6}, 4)) + assert.False(t, Int64SliceContains([]int64{1, 2, 3, 4, 5, 6}, 7)) +} diff --git a/vendor/github.com/pengsrc/go-shared/convert/time.go b/vendor/github.com/pengsrc/go-shared/convert/time.go new file mode 100644 index 000000000..e370e5af3 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/convert/time.go @@ -0,0 +1,56 @@ +package convert + +import ( + "fmt" + "time" + + "github.com/pengsrc/go-shared/check" +) + +// Supported time layouts +const ( + RFC822 = "Mon, 02 Jan 2006 15:04:05 GMT" + ISO8601 = "2006-01-02T15:04:05Z" + ISO8601Milli = "2006-01-02T15:04:05.000Z" + NGINXTime = "02/Jan/2006:15:04:05 -0700" +) + +// TimeToString transforms given time to string. +func TimeToString(timeValue time.Time, format string) string { + if check.StringSliceContains([]string{RFC822, ISO8601, ISO8601Milli}, format) { + timeValue = timeValue.UTC() + } + return timeValue.Format(format) +} + +// StringToTime transforms given string to time. +func StringToTime(timeString string, format string) (time.Time, error) { + result, err := time.Parse(format, timeString) + if timeString != "0001-01-01T00:00:00Z" { + zero := time.Time{} + if result == zero { + err = fmt.Errorf(`failed to parse "%s" like "%s"`, timeString, format) + } + } + return result, err +} + +// TimeToTimestamp transforms given time to unix time int. +func TimeToTimestamp(t time.Time) int64 { + return t.Unix() +} + +// TimestampToTime transforms given unix time int64 to time in UTC. +func TimestampToTime(unix int64) time.Time { + return time.Unix(unix, 0).UTC() +} + +// StringToUnixTimestamp transforms given string to unix time int64. It will +// return -1 when time string parse error. +func StringToUnixTimestamp(timeString string, format string) int64 { + t, err := StringToTime(timeString, format) + if err != nil { + return -1 + } + return t.Unix() +} diff --git a/vendor/github.com/pengsrc/go-shared/convert/time_test.go b/vendor/github.com/pengsrc/go-shared/convert/time_test.go new file mode 100644 index 000000000..14a8905a3 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/convert/time_test.go @@ -0,0 +1,78 @@ +package convert + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTimeToString(t *testing.T) { + tz, err := time.LoadLocation("Asia/Shanghai") + assert.NoError(t, err) + + someTime := time.Date(2016, 9, 1, 15, 30, 0, 500000000, tz) + assert.Equal(t, "Thu, 01 Sep 2016 07:30:00 GMT", TimeToString(someTime, RFC822)) + assert.Equal(t, "2016-09-01T07:30:00Z", TimeToString(someTime, ISO8601)) + assert.Equal(t, "2016-09-01T07:30:00.500Z", TimeToString(someTime, ISO8601Milli)) + assert.Equal(t, "01/Sep/2016:15:30:00 +0800", TimeToString(someTime, NGINXTime)) + assert.Equal(t, "01/Sep/2016:07:30:00 +0000", TimeToString(someTime.UTC(), NGINXTime)) +} + +func TestStringToTime(t *testing.T) { + tz, err := time.LoadLocation("Asia/Shanghai") + assert.NoError(t, err) + someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz) + + parsedTime, err := StringToTime("Thu, 01 Sep 2016 07:30:00 GMT", RFC822) + assert.NoError(t, err) + assert.Equal(t, someTime.UTC(), parsedTime) + + parsedTime, err = StringToTime("2016-09-01T07:30:00Z", ISO8601) + assert.NoError(t, err) + assert.Equal(t, someTime.UTC(), parsedTime) + + parsedTime, err = StringToTime("1472715000", ISO8601) + assert.Error(t, err) + assert.Equal(t, time.Time{}, parsedTime) + + someTime = time.Date(2016, 9, 1, 15, 30, 0, 500000000, tz) + parsedTime, err = StringToTime("2016-09-01T07:30:00.500Z", ISO8601Milli) + assert.NoError(t, err) + assert.Equal(t, someTime.UTC(), parsedTime) + someTime = time.Date(2016, 9, 1, 15, 30, 0, 0, tz) + + parsedTime, err = StringToTime("01/Sep/2016:15:30:00 +0800", NGINXTime) + assert.NoError(t, err) + assert.Equal(t, someTime.UTC(), parsedTime.UTC()) + + parsedTime, err = StringToTime("01/Sep/2016:07:30:00 +0000", NGINXTime) + assert.NoError(t, err) + assert.Equal(t, someTime.UTC(), parsedTime.UTC()) +} + +func TestStringToUnixString(t *testing.T) { + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("Thu, 01 Sep 2016 07:30:00 GMT", RFC822)) + assert.Equal(t, int64(-1), StringToUnixTimestamp("2016-09-01T07:30:00.000Z", RFC822)) + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00Z", ISO8601)) + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00.000Z", ISO8601Milli)) + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00.500Z", ISO8601Milli)) + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("01/Sep/2016:15:30:00 +0800", NGINXTime)) + assert.Equal(t, int64(1472715000), StringToUnixTimestamp("01/Sep/2016:07:30:00 +0000", NGINXTime)) +} + +func TestTimeToUnixInt(t *testing.T) { + tz, err := time.LoadLocation("Asia/Shanghai") + assert.NoError(t, err) + someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz) + + assert.Equal(t, int64(1472715000), TimeToTimestamp(someTime)) +} + +func TestUnixIntToTime(t *testing.T) { + tz, err := time.LoadLocation("Asia/Shanghai") + assert.NoError(t, err) + someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz) + + assert.Equal(t, someTime.UTC(), TimestampToTime(1472715000)) +} diff --git a/vendor/github.com/pengsrc/go-shared/convert/types.go b/vendor/github.com/pengsrc/go-shared/convert/types.go new file mode 100644 index 000000000..bb451282d --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/convert/types.go @@ -0,0 +1,477 @@ +package convert + +import ( + "time" +) + +// String returns a pointer to the given string value. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the given string pointer or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the given bool value. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the given bool pointer or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the given int value. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the given int pointer or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the given int32 value. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the given int32 pointer or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the given int64 value. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the given int64 pointer or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the given float32 value. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the given float32 pointer or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the given float64 value. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the given float64 pointer or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the given time.Time value. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the given time.Time pointer or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/pengsrc/go-shared/convert/types_test.go b/vendor/github.com/pengsrc/go-shared/convert/types_test.go new file mode 100644 index 000000000..9ccab2ac6 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/convert/types_test.go @@ -0,0 +1,635 @@ +package convert + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestStringValue(t *testing.T) { + s := "string" + assert.Equal(t, s, StringValue(String(s))) + + assert.Equal(t, "", StringValue(nil)) +} + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestBoolValue(t *testing.T) { + b := true + assert.Equal(t, b, BoolValue(Bool(b))) + + assert.Equal(t, false, BoolValue(nil)) +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestIntValue(t *testing.T) { + i := 1024 + assert.Equal(t, i, IntValue(Int(i))) + + assert.Equal(t, 0, IntValue(nil)) +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestInt64Value(t *testing.T) { + i := int64(1024) + assert.Equal(t, i, Int64Value(Int64(i))) + + assert.Equal(t, int64(0), Int64Value(nil)) +} + +var testCasesInt64Slice = [][]int64{ + {1, 2, 3, 4}, +} + +func TestInt64Slice(t *testing.T) { + for idx, in := range testCasesInt64Slice { + if in == nil { + continue + } + out := Int64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt64ValueSlice = [][]*int64{} + +func TestInt64ValueSlice(t *testing.T) { + for idx, in := range testCasesInt64ValueSlice { + if in == nil { + continue + } + out := Int64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt64Map = []map[string]int64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt64Map(t *testing.T) { + for idx, in := range testCasesInt64Map { + if in == nil { + continue + } + out := Int64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestInt32Value(t *testing.T) { + i := int32(1024) + assert.Equal(t, i, Int32Value(Int32(i))) + + assert.Equal(t, int32(0), Int32Value(nil)) +} + +var testCasesInt32Slice = [][]int32{ + {1, 2, 3, 4}, +} + +func TestInt32Slice(t *testing.T) { + for idx, in := range testCasesInt32Slice { + if in == nil { + continue + } + out := Int32Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int32ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesInt32ValueSlice = [][]*int32{} + +func TestInt32ValueSlice(t *testing.T) { + for idx, in := range testCasesInt32ValueSlice { + if in == nil { + continue + } + out := Int32ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Int32Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesInt32Map = []map[string]int32{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestInt32Map(t *testing.T) { + for idx, in := range testCasesInt32Map { + if in == nil { + continue + } + out := Int32Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Int32ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestFloat64Value(t *testing.T) { + i := float64(1024) + assert.Equal(t, i, Float64Value(Float64(i))) + + assert.Equal(t, float64(0), Float64Value(nil)) +} + +var testCasesFloat64Slice = [][]float64{ + {1, 2, 3, 4}, +} + +func TestFloat64Slice(t *testing.T) { + for idx, in := range testCasesFloat64Slice { + if in == nil { + continue + } + out := Float64Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat64ValueSlice = [][]*float64{} + +func TestFloat64ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat64ValueSlice { + if in == nil { + continue + } + out := Float64ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float64Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat64Map = []map[string]float64{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat64Map(t *testing.T) { + for idx, in := range testCasesFloat64Map { + if in == nil { + continue + } + out := Float64Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float64ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestFloat32Value(t *testing.T) { + i := float32(1024) + assert.Equal(t, i, Float32Value(Float32(i))) + + assert.Equal(t, float32(0), Float32Value(nil)) +} + +var testCasesFloat32Slice = [][]float32{ + {1, 2, 3, 4}, +} + +func TestFloat32Slice(t *testing.T) { + for idx, in := range testCasesFloat32Slice { + if in == nil { + continue + } + out := Float32Slice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float32ValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesFloat32ValueSlice = [][]*float32{} + +func TestFloat32ValueSlice(t *testing.T) { + for idx, in := range testCasesFloat32ValueSlice { + if in == nil { + continue + } + out := Float32ValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := Float32Slice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesFloat32Map = []map[string]float32{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestFloat32Map(t *testing.T) { + for idx, in := range testCasesFloat32Map { + if in == nil { + continue + } + out := Float32Map(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := Float32ValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +func TestTimeValue(t *testing.T) { + tm := time.Time{} + assert.Equal(t, tm, TimeValue(Time(tm))) + + assert.Equal(t, time.Time{}, TimeValue(nil)) +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff --git a/vendor/github.com/pengsrc/go-shared/glide.lock b/vendor/github.com/pengsrc/go-shared/glide.lock new file mode 100644 index 000000000..3a73f187b --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/glide.lock @@ -0,0 +1,28 @@ +hash: d9fb8784d4c53a209ba12b5be1dc51d43b925ff53a06b5df850a9f7afa5e3e08 +updated: 2017-04-11T15:44:30.513321945+08:00 +imports: +- name: github.com/davecgh/go-spew + version: 346938d642f2ec3594ed81d874461961cd0faa76 + subpackages: + - spew +- name: github.com/Jeffail/gabs + version: 2a3aa15961d5fee6047b8151b67ac2f08ba2c48c +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/Sirupsen/logrus + version: d26492970760ca5d33129d2d799e34be5c4782eb +- name: github.com/stretchr/testify + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + subpackages: + - assert +- name: golang.org/x/sys + version: f3918c30c5c2cb527c0b071a27c35120a6c0719a + repo: https://github.com/golang/sys.git + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: a5b47d31c556af34a302ce5d659e6fea44d90de0 + repo: https://github.com/go-yaml/yaml.git +testImports: [] diff --git a/vendor/github.com/pengsrc/go-shared/glide.yaml b/vendor/github.com/pengsrc/go-shared/glide.yaml new file mode 100644 index 000000000..63a466c4a --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/glide.yaml @@ -0,0 +1,23 @@ +package: github.com/pengsrc/go-shared +import: +# Test +- package: github.com/stretchr/testify + version: v1.1.4 +- package: github.com/davecgh/go-spew + version: v1.1.0 +- package: github.com/pmezard/go-difflib + version: v1.0.0 +# JSON +- package: github.com/Jeffail/gabs + version: 1.0 +# YAML +- package: gopkg.in/yaml.v2 + version: a5b47d31c556af34a302ce5d659e6fea44d90de0 + repo: https://github.com/go-yaml/yaml.git +# Logging +- package: github.com/Sirupsen/logrus + version: v0.11.0 +# GoLang +- package: golang.org/x/sys + version: f3918c30c5c2cb527c0b071a27c35120a6c0719a + repo: https://github.com/golang/sys.git diff --git a/vendor/github.com/pengsrc/go-shared/json/json.go b/vendor/github.com/pengsrc/go-shared/json/json.go new file mode 100644 index 000000000..4538e84f9 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/json/json.go @@ -0,0 +1,50 @@ +package json + +import ( + "bytes" + "encoding/json" +) + +// Encode encode given interface to json byte slice. +func Encode(source interface{}, unescape bool) ([]byte, error) { + bytesResult, err := json.Marshal(source) + if err != nil { + return []byte{}, err + } + + if unescape { + bytesResult = bytes.Replace(bytesResult, []byte("\\u003c"), []byte("<"), -1) + bytesResult = bytes.Replace(bytesResult, []byte("\\u003e"), []byte(">"), -1) + bytesResult = bytes.Replace(bytesResult, []byte("\\u0026"), []byte("&"), -1) + } + + return bytesResult, nil +} + +// Decode decode given json byte slice to corresponding struct. +func Decode(content []byte, destinations ...interface{}) (interface{}, error) { + var destination interface{} + var err error + if len(destinations) == 1 { + destination = destinations[0] + err = json.Unmarshal(content, destination) + } else { + err = json.Unmarshal(content, &destination) + } + + if err != nil { + return nil, err + } + return destination, err +} + +// FormatToReadable formats given json byte slice prettily. +func FormatToReadable(source []byte) ([]byte, error) { + var out bytes.Buffer + err := json.Indent(&out, source, "", " ") // Using 2 space indent + if err != nil { + return []byte{}, err + } + + return out.Bytes(), nil +} diff --git a/vendor/github.com/pengsrc/go-shared/json/json_test.go b/vendor/github.com/pengsrc/go-shared/json/json_test.go new file mode 100644 index 000000000..5d7b2b0fd --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/json/json_test.go @@ -0,0 +1,79 @@ +package json + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestJSONDecodeUnknown(t *testing.T) { + jsonString := `{ + "key1" : "This is a string.", + "key2" : 10.50, + "key3": [null, {"nestedKey1": "Another string"}] + }` + + anyData, err := Decode([]byte(jsonString)) + assert.NoError(t, err) + data := anyData.(map[string]interface{}) + assert.Equal(t, 10.50, data["key2"]) + + var anotherData interface{} + _, err = Decode([]byte(jsonString), &anotherData) + assert.NoError(t, err) + data = anyData.(map[string]interface{}) + assert.Equal(t, 10.50, data["key2"]) + + _, err = Decode([]byte(`- - -`), &JSONMustError{}) + assert.Error(t, err) +} + +func TestJSONDecodeKnown(t *testing.T) { + type SampleJSON struct { + Name string `json:"name"` + Description string `json:"description"` + } + sampleJSONString := `{"name": "NAME"}` + + sample := SampleJSON{Name: "NaMe", Description: "DeScRiPtIoN"} + anyDataPointer, err := Decode([]byte(sampleJSONString), &sample) + assert.NoError(t, err) + data := anyDataPointer.(*SampleJSON) + assert.Equal(t, "NAME", sample.Name) + assert.Equal(t, "DeScRiPtIoN", sample.Description) + assert.Equal(t, "NAME", (*data).Name) + assert.Equal(t, "DeScRiPtIoN", (*data).Description) +} + +func TestJSONEncode(t *testing.T) { + type SampleJSON struct { + Name string `json:"name"` + Description string `json:"description"` + } + sample := SampleJSON{Name: "NaMe", Description: "DeScRiPtIoN"} + + jsonBytes, err := Encode(sample, true) + assert.NoError(t, err) + assert.Equal(t, `{"name":"NaMe","description":"DeScRiPtIoN"}`, string(jsonBytes)) + + _, err = Encode(&JSONMustError{}, true) + assert.Error(t, err) +} + +func TestJSONFormatToReadable(t *testing.T) { + sampleJSONString := `{"name": "NAME"}` + + jsonBytes, err := FormatToReadable([]byte(sampleJSONString)) + assert.NoError(t, err) + assert.Equal(t, "{\n \"name\": \"NAME\"\n}", string(jsonBytes)) + + _, err = FormatToReadable([]byte(`XXXXX`)) + assert.Error(t, err) +} + +type JSONMustError struct{} + +func (*JSONMustError) MarshalJSON() ([]byte, error) { + return []byte{}, errors.New("marshal error") +} diff --git a/vendor/github.com/pengsrc/go-shared/logger/logger.go b/vendor/github.com/pengsrc/go-shared/logger/logger.go new file mode 100644 index 000000000..85481a332 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/logger/logger.go @@ -0,0 +1,358 @@ +// Package logger provides support for logging to stdout and stderr. +package logger + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + "path" + "strings" + "syscall" + "time" + + log "github.com/Sirupsen/logrus" + + "github.com/pengsrc/go-shared/convert" + "github.com/pengsrc/go-shared/reopen" +) + +// LogFormatter is used to format log entry. +type LogFormatter struct{} + +// Format formats a given log entry, returns byte slice and error. +func (c *LogFormatter) Format(entry *log.Entry) ([]byte, error) { + level := strings.ToUpper(entry.Level.String()) + if level == "WARNING" { + level = "WARN" + } + if len(level) < 5 { + level = strings.Repeat(" ", 5-len(level)) + level + } + + return []byte( + fmt.Sprintf( + "[%s #%d] %s -- : %s\n", + convert.TimeToString(time.Now(), convert.ISO8601Milli), + os.Getpid(), + level, + entry.Message, + ), + ), nil +} + +// NewLogFormatter creates a new log formatter. +func NewLogFormatter() *LogFormatter { + return &LogFormatter{} +} + +// ErrorHook presents error hook. +type ErrorHook struct { + levels []log.Level + + out io.Writer + formatter log.Formatter +} + +// Levels returns error log levels. +func (eh *ErrorHook) Levels() []log.Level { + return eh.levels +} + +// Fire triggers before logging. +func (eh *ErrorHook) Fire(entry *log.Entry) error { + formatted, err := eh.formatter.Format(entry) + if err != nil { + return err + } + _, err = eh.out.Write(formatted) + if err != nil { + return err + } + return nil +} + +// NewErrorHook creates new error hook. +func NewErrorHook(out io.Writer) *ErrorHook { + return &ErrorHook{ + levels: []log.Level{ + log.WarnLevel, + log.ErrorLevel, + log.FatalLevel, + log.PanicLevel, + }, + out: out, + formatter: NewLogFormatter(), + } +} + +// Logger presents a logger. +type Logger struct { + origLogger *log.Logger + + out io.Writer + errOut io.Writer + + bufferedOut Flusher + bufferedErrOut Flusher +} + +// Flusher defines a interface with Flush() method. +type Flusher interface { + Flush() +} + +// GetLevel get the log level string. +func (l *Logger) GetLevel() string { + return l.origLogger.Level.String() +} + +// SetLevel sets the log level. Valid levels are "debug", "info", "warn", "error", and "fatal". +func (l *Logger) SetLevel(level string) { + lvl, err := log.ParseLevel(level) + if err != nil { + l.Fatal(fmt.Sprintf(`log level not valid: "%s"`, level)) + } + l.origLogger.Level = lvl +} + +// Flush writes buffered logs. +func (l *Logger) Flush() { + if l.bufferedOut != nil { + l.bufferedOut.Flush() + } + if l.bufferedErrOut != nil { + l.bufferedErrOut.Flush() + } +} + +// Debug logs a message with severity DEBUG. +func (l *Logger) Debug(message string) { + l.output(l.origLogger.Debug, message) +} + +// Info logs a message with severity INFO. +func (l *Logger) Info(message string) { + l.output(l.origLogger.Info, message) +} + +// Warn logs a message with severity WARN. +func (l *Logger) Warn(message string) { + l.output(l.origLogger.Warn, message) +} + +// Error logs a message with severity ERROR. +func (l *Logger) Error(message string) { + l.output(l.origLogger.Error, message) +} + +// Fatal logs a message with severity ERROR followed by a call to os.Exit(). +func (l *Logger) Fatal(message string) { + l.output(l.origLogger.Fatal, message) +} + +// Debugf logs a message with severity DEBUG in format. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.output(l.origLogger.Debug, format, v...) +} + +// Infof logs a message with severity INFO in format. +func (l *Logger) Infof(format string, v ...interface{}) { + l.output(l.origLogger.Info, format, v...) +} + +// Warnf logs a message with severity WARN in format. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.output(l.origLogger.Warn, format, v...) +} + +// Errorf logs a message with severity ERROR in format. +func (l *Logger) Errorf(format string, v ...interface{}) { + l.output(l.origLogger.Error, format, v...) +} + +// Fatalf logs a message with severity ERROR in format followed by a call to +// os.Exit(). +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.output(l.origLogger.Fatal, format, v...) +} + +func (l *Logger) output(origin func(...interface{}), formatOrMessage string, v ...interface{}) { + if len(v) > 0 { + origin(fmt.Sprintf(formatOrMessage, v...)) + } else { + origin(formatOrMessage) + } +} + +// CheckLevel checks whether the log level is valid. +func CheckLevel(level string) error { + if _, err := log.ParseLevel(level); err != nil { + return fmt.Errorf(`log level not valid: "%s"`, level) + } + return nil +} + +// NewFileLogger creates a logger that write into file. +func NewFileLogger(filePath string, level ...string) (*Logger, error) { + return NewFileLoggerWithErr(filePath, "", level...) +} + +// NewFileLoggerWithErr creates a logger that write into files. +func NewFileLoggerWithErr(filePath, errFilePath string, level ...string) (*Logger, error) { + if err := checkDir(path.Dir(filePath)); err != nil { + return nil, err + } + if errFilePath != "" { + if err := checkDir(path.Dir(errFilePath)); err != nil { + return nil, err + } + } + + out, err := reopen.NewFileWriter(filePath) + if err != nil { + return nil, err + } + var errOut *reopen.FileWriter + if errFilePath != "" { + errOut, err = reopen.NewFileWriter(errFilePath) + if err != nil { + return nil, err + } + } + + c := make(chan os.Signal) + go func() { + for { + select { + case <-c: + out.Reopen() + if errOut != nil { + errOut.Reopen() + } + } + } + }() + signal.Notify(c, syscall.SIGHUP) + + if errOut == nil { + return NewLoggerWithErr(out, nil, level...) + } + return NewLoggerWithErr(out, errOut, level...) +} + +// NewBufferedFileLogger creates a logger that write into file with buffer. +func NewBufferedFileLogger(filePath string, level ...string) (*Logger, error) { + return NewBufferedFileLoggerWithErr(filePath, "", level...) +} + +// NewBufferedFileLoggerWithErr creates a logger that write into files with buffer. +func NewBufferedFileLoggerWithErr(filePath, errFilePath string, level ...string) (*Logger, error) { + if err := checkDir(path.Dir(filePath)); err != nil { + return nil, err + } + if errFilePath != "" { + if err := checkDir(path.Dir(errFilePath)); err != nil { + return nil, err + } + } + + out, err := reopen.NewFileWriter(filePath) + if err != nil { + return nil, err + } + var errOut *reopen.FileWriter + if errFilePath != "" { + errOut, err = reopen.NewFileWriter(errFilePath) + if err != nil { + return nil, err + } + } + + bufferedOut := reopen.NewBufferedFileWriter(out) + var bufferedErrOut *reopen.BufferedFileWriter + if errOut != nil { + bufferedErrOut = reopen.NewBufferedFileWriter(errOut) + } + + c := make(chan os.Signal) + go func() { + for { + select { + case <-c: + bufferedOut.Reopen() + if bufferedErrOut != nil { + bufferedErrOut.Reopen() + } + case <-time.After(10 * time.Second): + bufferedOut.Flush() + if bufferedErrOut != nil { + bufferedErrOut.Flush() + } + } + } + }() + signal.Notify(c, syscall.SIGHUP) + + if bufferedErrOut == nil { + return NewLoggerWithErr(bufferedOut, nil, level...) + } + return NewLoggerWithErr(bufferedOut, bufferedErrOut, level...) +} + +// NewTerminalLogger creates a logger that write into terminal. +func NewTerminalLogger(level ...string) (*Logger, error) { + return NewLogger(os.Stdout, level...) +} + +// NewTerminalLoggerWithErr creates a logger that write into terminal. +func NewTerminalLoggerWithErr(level ...string) (*Logger, error) { + return NewLoggerWithErr(os.Stdout, os.Stderr, level...) +} + +// NewLogger creates a new logger for given out and level, and the level is +// optional. +func NewLogger(out io.Writer, level ...string) (*Logger, error) { + return NewLoggerWithErr(out, nil, level...) +} + +// NewLoggerWithErr creates a new logger for given out, err out, level, and the +// err out can be nil, and the level is optional. +func NewLoggerWithErr(out, errOut io.Writer, level ...string) (*Logger, error) { + if out == nil { + return nil, errors.New(`must specify the output for logger`) + } + l := &Logger{ + origLogger: &log.Logger{ + Out: out, + Formatter: NewLogFormatter(), + Hooks: log.LevelHooks{}, + Level: log.WarnLevel, + }, + out: out, + errOut: errOut, + } + + if errOut != nil { + l.origLogger.Hooks.Add(NewErrorHook(l.errOut)) + } + + if len(level) == 1 { + if err := CheckLevel(level[0]); err != nil { + return nil, err + } + l.SetLevel(level[0]) + } + + return l, nil +} + +func checkDir(dir string) error { + if info, err := os.Stat(dir); err != nil { + return fmt.Errorf(`directory not exists: %s`, dir) + } else if !info.IsDir() { + return fmt.Errorf(`path is not directory: %s`, dir) + } + return nil +} diff --git a/vendor/github.com/pengsrc/go-shared/logger/logger_test.go b/vendor/github.com/pengsrc/go-shared/logger/logger_test.go new file mode 100644 index 000000000..ddbc8fed1 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/logger/logger_test.go @@ -0,0 +1,218 @@ +package logger + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestCheckLevel(t *testing.T) { + assert.NoError(t, CheckLevel("warn")) + assert.Error(t, CheckLevel("invalid")) +} + +func TestSetAndGetLevel(t *testing.T) { + l, err := NewTerminalLogger() + assert.NoError(t, err) + + l.SetLevel("error") + assert.Equal(t, "error", l.GetLevel()) +} + +func TestNewFileLogger(t *testing.T) { + logFile := "/tmp/logger-test/test.log" + dir := path.Dir(logFile) + err := os.MkdirAll(dir, 0775) + assert.NoError(t, err) + defer os.RemoveAll(dir) + + l, err := NewFileLogger(logFile, "debug") + assert.NoError(t, err) + + l.Debug("file - debug") + l.Info("file - info") + l.Warn("file - warn") + l.Error("file - error") + + log, err := ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 5, len(strings.Split(string(log), "\n"))) + + // Move log file. + movedLogFile := fmt.Sprintf(`%s.move`, logFile) + os.Rename(logFile, movedLogFile) + + l.Error("file - error") + + log, err = ioutil.ReadFile(movedLogFile) + assert.NoError(t, err) + assert.Equal(t, 6, len(strings.Split(string(log), "\n"))) + + // Reopen. + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + time.Sleep(10 * time.Millisecond) + + l.Warn("file - warn") + l.Error("file - error") + + log, err = ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 3, len(strings.Split(string(log), "\n"))) +} + +func TestNewFileLoggerWithWf(t *testing.T) { + logFile := "/tmp/logger-test/test.log" + errLogFile := "/tmp/logger-test/test.log.wf" + dir := path.Dir(logFile) + err := os.MkdirAll(dir, 0775) + assert.NoError(t, err) + defer os.RemoveAll(dir) + + l, err := NewFileLoggerWithErr(logFile, errLogFile, "debug") + assert.NoError(t, err) + + l.Debug("file - debug") + l.Info("file - info") + l.Warn("file - warn") + l.Error("file - error") + + log, err := ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 5, len(strings.Split(string(log), "\n"))) + + errLog, err := ioutil.ReadFile(errLogFile) + assert.NoError(t, err) + assert.Equal(t, 3, len(strings.Split(string(errLog), "\n"))) + + // Move log file. + movedLogFile := fmt.Sprintf(`%s.move`, logFile) + os.Rename(logFile, movedLogFile) + + movedErrLogFile := fmt.Sprintf(`%s.move`, errLogFile) + os.Rename(errLogFile, movedErrLogFile) + + l.Error("file - error") + + log, err = ioutil.ReadFile(movedLogFile) + assert.NoError(t, err) + assert.Equal(t, 6, len(strings.Split(string(log), "\n"))) + + errLog, err = ioutil.ReadFile(movedErrLogFile) + assert.NoError(t, err) + assert.Equal(t, 4, len(strings.Split(string(errLog), "\n"))) + + // Reopen. + syscall.Kill(syscall.Getpid(), syscall.SIGHUP) + time.Sleep(10 * time.Millisecond) + + l.Warn("file - warn") + l.Error("file - error") + + log, err = ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 3, len(strings.Split(string(log), "\n"))) + + errLog, err = ioutil.ReadFile(errLogFile) + assert.NoError(t, err) + assert.Equal(t, 3, len(strings.Split(string(errLog), "\n"))) +} + +func TestBufferedFileLogger(t *testing.T) { + logFile := "/tmp/logger-test/test.log" + dir := path.Dir(logFile) + err := os.MkdirAll(dir, 0775) + assert.NoError(t, err) + defer os.RemoveAll(dir) + + l, err := NewBufferedFileLogger(logFile, "debug") + assert.NoError(t, err) + + l.Debug("file - debug") + l.Info("file - info") + l.Warn("file - warn") + l.Error("file - error") + + log, err := ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 1, len(strings.Split(string(log), "\n"))) + + // Wait timeout. + //time.Sleep(10*time.Second + 10*time.Millisecond) + // + //log, err = ioutil.ReadFile(logFile) + //assert.NoError(t, err) + //assert.Equal(t, 5, len(strings.Split(string(log), "\n"))) +} + +func TestBufferedFileLoggerWithErr(t *testing.T) { + logFile := "/tmp/logger-test/test.log" + errLogFile := "/tmp/logger-test/test.log.wf" + dir := path.Dir(logFile) + err := os.MkdirAll(dir, 0775) + assert.NoError(t, err) + defer os.RemoveAll(dir) + + errL, err := NewBufferedFileLoggerWithErr(logFile, errLogFile, "debug") + assert.NoError(t, err) + + errL.Debug("file - debug") + errL.Info("file - info") + errL.Warn("file - warn") + errL.Error("file - error") + + log, err := ioutil.ReadFile(logFile) + assert.NoError(t, err) + assert.Equal(t, 1, len(strings.Split(string(log), "\n"))) + + errLog, err := ioutil.ReadFile(errLogFile) + assert.NoError(t, err) + assert.Equal(t, 1, len(strings.Split(string(errLog), "\n"))) + + // Wait timeout. + //time.Sleep(10*time.Second + 10*time.Millisecond) + // + //log, err = ioutil.ReadFile(logFile) + //assert.NoError(t, err) + //assert.Equal(t, 5, len(strings.Split(string(log), "\n"))) + // + //errLog, err = ioutil.ReadFile(errLogFile) + //assert.NoError(t, err) + //assert.Equal(t, 3, len(strings.Split(string(errLog), "\n"))) +} + +func TestTerminalLogger(t *testing.T) { + l, err := NewTerminalLogger("debug") + assert.NoError(t, err) + + l.Debug("terminal - debug") + l.Info("terminal - info") + l.Warn("terminal - warn") + l.Error("terminal - error") + + l.Debugf("terminal - debug - %d", time.Now().Unix()) + l.Infof("terminal - info - %d", time.Now().Unix()) + l.Warnf("terminal - warn - %d", time.Now().Unix()) + l.Errorf("terminal - error - %d", time.Now().Unix()) +} + +func TestTerminalLoggerWithErr(t *testing.T) { + errL, err := NewTerminalLoggerWithErr("debug") + assert.NoError(t, err) + + errL.Debug("terminal - debug - err") + errL.Info("terminal - info - err") + errL.Warn("terminal - warn - err") + errL.Error("terminal - error - err") + + errL.Debugf("terminal - debug - err - %d", time.Now().Unix()) + errL.Infof("terminal - info - err - %d", time.Now().Unix()) + errL.Warnf("terminal - warn - err - %d", time.Now().Unix()) + errL.Errorf("terminal - error - err - %d", time.Now().Unix()) +} diff --git a/vendor/github.com/pengsrc/go-shared/pid/pidfile.go b/vendor/github.com/pengsrc/go-shared/pid/pidfile.go new file mode 100644 index 000000000..e2d54746e --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/pid/pidfile.go @@ -0,0 +1,49 @@ +// Package pid provides structure and helper functions to create and remove +// PID file. A PID file is usually a file used to store the process ID of a +// running process. +package pid + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// File is a file used to store the process ID of a running process. +type File struct { + path string +} + +func checkPIDFileAlreadyExists(path string) error { + if pidByte, err := ioutil.ReadFile(path); err == nil { + pidString := strings.TrimSpace(string(pidByte)) + if pid, err := strconv.Atoi(pidString); err == nil { + if processExists(pid) { + return fmt.Errorf("pid file found, ensure server is not running or delete %s", path) + } + } + } + return nil +} + +// New creates a PID file using the specified path. +func New(path string) (*File, error) { + if err := checkPIDFileAlreadyExists(path); err != nil { + return nil, err + } + if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { + return nil, err + } + + return &File{path: path}, nil +} + +// Remove removes the File. +func (file File) Remove() error { + if err := os.Remove(file.path); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/pengsrc/go-shared/pid/pidfile_darwin.go b/vendor/github.com/pengsrc/go-shared/pid/pidfile_darwin.go new file mode 100644 index 000000000..85c43e479 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/pid/pidfile_darwin.go @@ -0,0 +1,14 @@ +// +build darwin + +package pid + +import ( + "syscall" +) + +func processExists(pid int) bool { + // OS X does not have a proc filesystem. + // Use kill -0 pid to judge if the process exists. + err := syscall.Kill(pid, 0) + return err == nil +} diff --git a/vendor/github.com/pengsrc/go-shared/pid/pidfile_test.go b/vendor/github.com/pengsrc/go-shared/pid/pidfile_test.go new file mode 100644 index 000000000..a99cecf7f --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/pid/pidfile_test.go @@ -0,0 +1,38 @@ +package pid + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestNewAndRemove(t *testing.T) { + dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") + if err != nil { + t.Fatal("Could not create test directory") + } + + path := filepath.Join(dir, "testfile") + file, err := New(path) + if err != nil { + t.Fatal("Could not create test file", err) + } + + _, err = New(path) + if err == nil { + t.Fatal("Test file creation not blocked") + } + + if err := file.Remove(); err != nil { + t.Fatal("Could not delete created test file") + } +} + +func TestRemoveInvalidPath(t *testing.T) { + file := File{path: filepath.Join("foo", "bar")} + + if err := file.Remove(); err == nil { + t.Fatal("Non-existing file doesn't give an error on delete") + } +} diff --git a/vendor/github.com/pengsrc/go-shared/pid/pidfile_unix.go b/vendor/github.com/pengsrc/go-shared/pid/pidfile_unix.go new file mode 100644 index 000000000..7a99bd9e6 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/pid/pidfile_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!darwin + +package pid + +import ( + "os" + "path/filepath" + "strconv" +) + +func processExists(pid int) bool { + if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/pengsrc/go-shared/pid/pidfile_windows.go b/vendor/github.com/pengsrc/go-shared/pid/pidfile_windows.go new file mode 100644 index 000000000..b0461df7a --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/pid/pidfile_windows.go @@ -0,0 +1,23 @@ +package pid + +import "syscall" + +const ( + processQueryLimitedInformation = 0x1000 + + stillActive = 259 +) + +func processExists(pid int) bool { + h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) + if err != nil { + return false + } + var c uint32 + err = syscall.GetExitCodeProcess(h, &c) + syscall.Close(h) + if err != nil { + return c == stillActive + } + return true +} diff --git a/vendor/github.com/pengsrc/go-shared/reopen/reopen.go b/vendor/github.com/pengsrc/go-shared/reopen/reopen.go new file mode 100644 index 000000000..a168b40de --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/reopen/reopen.go @@ -0,0 +1,176 @@ +package reopen + +import ( + "bufio" + "io" + "os" + "sync" + "time" +) + +// Reopener interface defines something that can be reopened. +type Reopener interface { + Reopen() error +} + +// Writer is a writer that also can be reopened. +type Writer interface { + Reopener + io.Writer +} + +// WriteCloser is a io.WriteCloser that can also be reopened. +type WriteCloser interface { + Reopener + io.WriteCloser +} + +// FileWriter that can also be reopened. +type FileWriter struct { + // Ensures close/reopen/write are not called at the same time, protects f + mu sync.Mutex + f *os.File + mode os.FileMode + name string +} + +// Close calls the under lying File.Close(). +func (f *FileWriter) Close() error { + f.mu.Lock() + err := f.f.Close() + f.mu.Unlock() + return err +} + +// Reopen the file. +func (f *FileWriter) Reopen() error { + f.mu.Lock() + err := f.reopen() + f.mu.Unlock() + return err +} + +// Write implements the stander io.Writer interface. +func (f *FileWriter) Write(p []byte) (int, error) { + f.mu.Lock() + n, err := f.f.Write(p) + f.mu.Unlock() + return n, err +} + +// reopen with mutex free. +func (f *FileWriter) reopen() error { + if f.f != nil { + f.f.Close() + f.f = nil + } + ff, err := os.OpenFile(f.name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, f.mode) + if err != nil { + f.f = nil + return err + } + f.f = ff + + return nil +} + +// NewFileWriter opens a file for appending and writing and can be reopened. +// It is a ReopenWriteCloser... +func NewFileWriter(name string) (*FileWriter, error) { + // Standard default mode + return NewFileWriterMode(name, 0644) +} + +// NewFileWriterMode opens a Reopener file with a specific permission. +func NewFileWriterMode(name string, mode os.FileMode) (*FileWriter, error) { + writer := FileWriter{ + f: nil, + name: name, + mode: mode, + } + err := writer.reopen() + if err != nil { + return nil, err + } + return &writer, nil +} + +// BufferedFileWriter is buffer writer than can be reopened. +type BufferedFileWriter struct { + mu sync.Mutex + OrigWriter *FileWriter + BufWriter *bufio.Writer +} + +// Reopen implement Reopener. +func (bw *BufferedFileWriter) Reopen() error { + bw.mu.Lock() + bw.BufWriter.Flush() + + // Use non-mutex version since we are using this one. + err := bw.OrigWriter.reopen() + + bw.BufWriter.Reset(io.Writer(bw.OrigWriter)) + bw.mu.Unlock() + + return err +} + +// Close flushes the internal buffer and closes the destination file. +func (bw *BufferedFileWriter) Close() error { + bw.mu.Lock() + bw.BufWriter.Flush() + bw.OrigWriter.f.Close() + bw.mu.Unlock() + return nil +} + +// Write implements io.Writer (and reopen.Writer). +func (bw *BufferedFileWriter) Write(p []byte) (int, error) { + bw.mu.Lock() + n, err := bw.BufWriter.Write(p) + + // Special Case... if the used space in the buffer is LESS than + // the input, then we did a flush in the middle of the line + // and the full log line was not sent on its way. + if bw.BufWriter.Buffered() < len(p) { + bw.BufWriter.Flush() + } + + bw.mu.Unlock() + return n, err +} + +// Flush flushes the buffer. +func (bw *BufferedFileWriter) Flush() { + bw.mu.Lock() + bw.BufWriter.Flush() + bw.OrigWriter.f.Sync() + bw.mu.Unlock() +} + +// flushDaemon periodically flushes the log file buffers. +func (bw *BufferedFileWriter) flushDaemon(interval time.Duration) { + for range time.NewTicker(interval).C { + bw.Flush() + } +} + +const bufferSize = 256 * 1024 +const flushInterval = 30 * time.Second + +// NewBufferedFileWriter opens a buffered file that is periodically flushed. +func NewBufferedFileWriter(w *FileWriter) *BufferedFileWriter { + return NewBufferedFileWriterSize(w, bufferSize, flushInterval) +} + +// NewBufferedFileWriterSize opens a buffered file with the given size that is periodically +// flushed on the given interval. +func NewBufferedFileWriterSize(w *FileWriter, size int, flush time.Duration) *BufferedFileWriter { + bw := BufferedFileWriter{ + OrigWriter: w, + BufWriter: bufio.NewWriterSize(w, size), + } + go bw.flushDaemon(flush) + return &bw +} diff --git a/vendor/github.com/pengsrc/go-shared/reopen/reopen_test.go b/vendor/github.com/pengsrc/go-shared/reopen/reopen_test.go new file mode 100644 index 000000000..0f015ac85 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/reopen/reopen_test.go @@ -0,0 +1,124 @@ +package reopen + +import ( + "io/ioutil" + "os" + "testing" +) + +// TestReopenAppend tests that we always append to an existing file +func TestReopenAppend(t *testing.T) { + filename := "/tmp/reopen_test_foo" + defer os.Remove(filename) + + // Create a sample file using normal means. + orig, err := os.Create(filename) + if err != nil { + t.Fatalf("Unable to create initial file %s: %s", filename, err) + } + _, err = orig.Write([]byte("line0\n")) + if err != nil { + t.Fatalf("Unable to write initial line %s: %s", filename, err) + } + err = orig.Close() + if err != nil { + t.Fatalf("Unable to close initial file: %s", err) + } + + // Test that making a new File appends. + f, err := NewFileWriter(filename) + if err != nil { + t.Fatalf("Unable to create %s", filename) + } + _, err = f.Write([]byte("line1\n")) + if err != nil { + t.Errorf("Got write error1: %s", err) + } + + // Test that reopen always appends. + err = f.Reopen() + if err != nil { + t.Errorf("Got reopen error %s: %s", filename, err) + } + _, err = f.Write([]byte("line2\n")) + if err != nil { + t.Errorf("Got write error2 on %s: %s", filename, err) + } + + // Close file. + err = f.Close() + if err != nil { + t.Errorf("Got closing error for %s: %s", filename, err) + } + + // Read file, make sure it contains line0, line1, line2. + out, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("Unable read in final file %s: %s", filename, err) + } + outStr := string(out) + if outStr != "line0\nline1\nline2\n" { + t.Errorf("Result was %s", outStr) + } +} + +// TestChangeINode tests that reopen works when inode is swapped out. +func TestChangeINODE(t *testing.T) { + filename := "/tmp/reopen_test_foo" + moveFilename := "/tmp/reopen_test_foo.orig" + defer os.Remove(filename) + defer os.Remove(moveFilename) + + // Step 1 -- Create a sample file using normal means. + orig, err := os.Create(filename) + if err != nil { + t.Fatalf("Unable to create initial file %s: %s", filename, err) + } + err = orig.Close() + if err != nil { + t.Fatalf("Unable to close initial file: %s", err) + } + + // Step 2 -- Test that making a new File appends. + f, err := NewFileWriter(filename) + if err != nil { + t.Fatalf("Unable to create %s", filename) + } + _, err = f.Write([]byte("line1\n")) + if err != nil { + t.Errorf("Got write error1: %s", err) + } + + // Step 3 -- Now move file. + err = os.Rename(filename, moveFilename) + if err != nil { + t.Errorf("Renaming error: %s", err) + } + f.Write([]byte("after1\n")) + + // Step Test that reopen always appends. + err = f.Reopen() + if err != nil { + t.Errorf("Got reopen error %s: %s", filename, err) + } + _, err = f.Write([]byte("line2\n")) + if err != nil { + t.Errorf("Got write error2 on %s: %s", filename, err) + } + + // Close file. + err = f.Close() + if err != nil { + t.Errorf("Got closing error for %s: %s", filename, err) + } + + // Read file, make sure it contains line0, line1, line2. + out, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("Unable read in final file %s: %s", filename, err) + } + outStr := string(out) + if outStr != "line2\n" { + t.Errorf("Result was %s", outStr) + } +} diff --git a/vendor/github.com/pengsrc/go-shared/rest/rest.go b/vendor/github.com/pengsrc/go-shared/rest/rest.go new file mode 100644 index 000000000..b16fd8513 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/rest/rest.go @@ -0,0 +1,151 @@ +package rest + +import ( + "bytes" + "errors" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/Jeffail/gabs" +) + +// Method contains the supported HTTP verbs. +type Method string + +// Supported HTTP verbs. +const ( + Get Method = "GET" + Post Method = "POST" + Put Method = "PUT" + Patch Method = "PATCH" + Delete Method = "DELETE" +) + +// Request holds the request to an API Call. +type Request struct { + Method Method + BaseURL string // e.g. https://api.service.com + Headers map[string]string + QueryParams map[string]string + Body []byte +} + +// Response holds the response from an API call. +type Response struct { + StatusCode int // e.g. 200 + Headers http.Header // e.g. map[X-Rate-Limit:[600]] + Body string // e.g. {"result: success"} + JSON *gabs.Container +} + +// ParseJSON parses the response body to JSON container. +func (r *Response) ParseJSON() error { + if strings.Contains(r.Headers.Get("Content-Type"), "application/json") { + json, err := gabs.ParseJSON([]byte(r.Body)) + if err != nil { + return err + } + + r.JSON = json + return nil + } + + return errors.New("response body is not JSON") +} + +// DefaultClient is used if no custom HTTP client is defined +var DefaultClient = &Client{HTTPClient: http.DefaultClient} + +// Client allows modification of client headers, redirect policy +// and other settings +// See https://golang.org/pkg/net/http +type Client struct { + HTTPClient *http.Client +} + +// The following functions enable the ability to define a +// custom HTTP Client + +// MakeRequest makes the API call. +func (c *Client) MakeRequest(req *http.Request) (*http.Response, error) { + return c.HTTPClient.Do(req) +} + +// API is the main interface to the API. +func (c *Client) API(r *Request) (*Response, error) { + // Build the HTTP request object. + req, err := BuildRequestObject(r) + if err != nil { + return nil, err + } + + // Build the HTTP client and make the request. + res, err := c.MakeRequest(req) + if err != nil { + return nil, err + } + + // Build Response object. + response, err := BuildResponse(res) + if err != nil { + return nil, err + } + + return response, nil +} + +// AddQueryParameters adds query parameters to the URL. +func AddQueryParameters(baseURL string, queryParams map[string]string) string { + baseURL += "?" + params := url.Values{} + for key, value := range queryParams { + params.Add(key, value) + } + return baseURL + params.Encode() +} + +// BuildRequestObject creates the HTTP request object. +func BuildRequestObject(r *Request) (*http.Request, error) { + // Add any query parameters to the URL. + if len(r.QueryParams) != 0 { + r.BaseURL = AddQueryParameters(r.BaseURL, r.QueryParams) + } + req, err := http.NewRequest(string(r.Method), r.BaseURL, bytes.NewBuffer(r.Body)) + for key, value := range r.Headers { + req.Header.Set(key, value) + } + _, exists := req.Header["Content-Type"] + if len(r.Body) > 0 && !exists { + req.Header.Set("Content-Type", "application/json") + } + return req, err +} + +// BuildResponse builds the response struct. +func BuildResponse(r *http.Response) (*Response, error) { + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + defer r.Body.Close() + + response := Response{ + StatusCode: r.StatusCode, + Body: string(body), + Headers: r.Header, + } + + return &response, nil +} + +// MakeRequest makes the API call. +func MakeRequest(r *http.Request) (*http.Response, error) { + return DefaultClient.HTTPClient.Do(r) +} + +// API is the main interface to the API. +func API(request *Request) (*Response, error) { + return DefaultClient.API(request) +} diff --git a/vendor/github.com/pengsrc/go-shared/rest/rest_test.go b/vendor/github.com/pengsrc/go-shared/rest/rest_test.go new file mode 100644 index 000000000..968639eb7 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/rest/rest_test.go @@ -0,0 +1,138 @@ +package rest + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBuildURL(t *testing.T) { + testURL := AddQueryParameters( + "http://api.test.com", + map[string]string{ + "test": "1", + "test2": "2", + }, + ) + assert.Equal(t, "http://api.test.com?test=1&test2=2", testURL) +} + +func TestBuildRequest(t *testing.T) { + request := Request{ + Method: Get, + BaseURL: "http://api.test.com", + Headers: map[string]string{ + "Content-Type": "application/json", + "Authorization": "Bearer APK_KEY", + }, + QueryParams: map[string]string{ + "test": "1", + "test2": "2", + }, + } + req, err := BuildRequestObject(&request) + assert.NoError(t, err) + assert.NotNil(t, req) +} + +func TestBuildResponse(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/not+json") + fmt.Fprintln(w, "{\"message\": \"success\"}") + })) + defer fakeServer.Close() + + request := Request{ + Method: Get, + BaseURL: fakeServer.URL, + } + req, err := BuildRequestObject(&request) + assert.NoError(t, err) + + res, err := MakeRequest(req) + assert.NoError(t, err) + + response, err := BuildResponse(res) + assert.NoError(t, err) + err = response.ParseJSON() + assert.Error(t, err) + + assert.Equal(t, 200, response.StatusCode) + assert.NotEqual(t, 0, len(response.Body)) + assert.NotEqual(t, 0, len(response.Headers)) + assert.Nil(t, response.JSON) +} + +func TestRest(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + fmt.Fprintln(w, "{\"message\": \"success\"}") + })) + defer fakeServer.Close() + + request := Request{ + Method: Get, + BaseURL: fakeServer.URL + "/test_endpoint", + Headers: map[string]string{ + "Content-Type": "application/json", + "Authorization": "Bearer APK_KEY", + }, + QueryParams: map[string]string{ + "test": "1", + "test2": "2", + }, + } + response, err := API(&request) + assert.NoError(t, err) + err = response.ParseJSON() + assert.NoError(t, err) + + assert.Equal(t, 200, response.StatusCode) + assert.NotEqual(t, 0, len(response.Body)) + assert.NotEqual(t, 0, len(response.Headers)) + assert.Equal(t, "success", response.JSON.Path("message").Data().(string)) +} + +func TestDefaultContentType(t *testing.T) { + request := Request{ + Method: Get, + BaseURL: "http://localhost", + Body: []byte(`{"hello": "world"}`), + } + req, err := BuildRequestObject(&request) + assert.NoError(t, err) + assert.Equal(t, "application/json", req.Header.Get("Content-Type")) +} + +func TestCustomContentType(t *testing.T) { + request := Request{ + Method: Get, + BaseURL: "http://localhost", + Headers: map[string]string{"Content-Type": "custom"}, + Body: []byte("Hello World"), + } + res, err := BuildRequestObject(&request) + assert.NoError(t, err) + assert.Equal(t, "custom", res.Header.Get("Content-Type")) +} + +func TestCustomHTTPClient(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(time.Millisecond * 20) + fmt.Fprintln(w, "{\"message\": \"success\"}") + })) + defer fakeServer.Close() + + request := Request{ + Method: Get, + BaseURL: fakeServer.URL + "/test_endpoint", + } + customClient := &Client{&http.Client{Timeout: time.Millisecond * 10}} + _, err := customClient.API(&request) + assert.True(t, strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers")) +} diff --git a/vendor/github.com/pengsrc/go-shared/yaml/yaml.go b/vendor/github.com/pengsrc/go-shared/yaml/yaml.go new file mode 100644 index 000000000..afed9b7e8 --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/yaml/yaml.go @@ -0,0 +1,32 @@ +package yaml + +import ( + "gopkg.in/yaml.v2" +) + +// Encode encode given interface to yaml byte slice. +func Encode(source interface{}) ([]byte, error) { + bytesResult, err := yaml.Marshal(source) + if err != nil { + return []byte{}, err + } + + return bytesResult, nil +} + +// Decode decode given yaml byte slice to corresponding struct. +func Decode(content []byte, destinations ...interface{}) (interface{}, error) { + var destination interface{} + var err error + if len(destinations) == 1 { + destination = destinations[0] + err = yaml.Unmarshal(content, destination) + } else { + err = yaml.Unmarshal(content, &destination) + } + + if err != nil { + return nil, err + } + return destination, err +} diff --git a/vendor/github.com/pengsrc/go-shared/yaml/yaml_test.go b/vendor/github.com/pengsrc/go-shared/yaml/yaml_test.go new file mode 100644 index 000000000..2238530be --- /dev/null +++ b/vendor/github.com/pengsrc/go-shared/yaml/yaml_test.go @@ -0,0 +1,72 @@ +package yaml + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestYAMLDecodeUnknown(t *testing.T) { + yamlString := ` +key1: "This is a string." # Single Line Comment +key2: 10.50 +key3: + - null + - nestedKey1: Anothor string +` + + anyData, err := Decode([]byte(yamlString)) + assert.NoError(t, err) + data := anyData.(map[interface{}]interface{}) + assert.Equal(t, 10.50, data["key2"]) +} + +func TestYAMLDecodeKnown(t *testing.T) { + type SampleYAML struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + } + sampleYAMLString := `name: "NAME"` + + sample := SampleYAML{Name: "NaMe", Description: "DeScRiPtIoN"} + anyDataPointer, err := Decode([]byte(sampleYAMLString), &sample) + assert.NoError(t, err) + data := anyDataPointer.(*SampleYAML) + assert.Equal(t, "NAME", sample.Name) + assert.Equal(t, "DeScRiPtIoN", sample.Description) + assert.Equal(t, "NAME", (*data).Name) + assert.Equal(t, "DeScRiPtIoN", (*data).Description) + + _, err = Decode([]byte(`- - -`), &YAMLMustError{}) + assert.Error(t, err) +} + +func TestYAMLDecodeEmpty(t *testing.T) { + yamlString := "" + + anyData, err := Decode([]byte(yamlString)) + assert.NoError(t, err) + assert.Nil(t, anyData) +} + +func TestYAMLEncode(t *testing.T) { + type SampleYAML struct { + Name string `yaml:"name"` + Description string `yaml:"description"` + } + sample := SampleYAML{Name: "NaMe", Description: "DeScRiPtIoN"} + + yamlBytes, err := Encode(sample) + assert.NoError(t, err) + assert.Equal(t, "name: NaMe\ndescription: DeScRiPtIoN\n", string(yamlBytes)) + + _, err = Encode(&YAMLMustError{}) + assert.Error(t, err) +} + +type YAMLMustError struct{} + +func (*YAMLMustError) MarshalYAML() (interface{}, error) { + return nil, errors.New("marshal error") +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/.gitignore b/vendor/github.com/yunify/qingstor-sdk-go/.gitignore new file mode 100644 index 000000000..3921f048a --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +out +gen +vendor +coverage +release diff --git a/vendor/github.com/yunify/qingstor-sdk-go/.gitmodules b/vendor/github.com/yunify/qingstor-sdk-go/.gitmodules new file mode 100644 index 000000000..614f6b85d --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/.gitmodules @@ -0,0 +1,6 @@ +[submodule "specs/qingstor"] + path = specs/qingstor + url = https://github.com/yunify/qingstor-api-specs.git +[submodule "test/features"] + path = test/features + url = https://github.com/yunify/qingstor-sdk-test-scenarios.git diff --git a/vendor/github.com/yunify/qingstor-sdk-go/.travis.yml b/vendor/github.com/yunify/qingstor-sdk-go/.travis.yml new file mode 100644 index 000000000..151a8dea6 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/.travis.yml @@ -0,0 +1,51 @@ +sudo: required + +services: + - docker + +language: go +go: + - 1.7 + +env: + matrix: + - GO_VERSION=1.8 + - GO_VERSION=1.7 + - GO_VERSION=1.6 + - GO_VERSION=1.5 + +cache: + directories: + - ${HOME}/source + +before_install: + - pushd ${HOME}/source + - if [[ ! -d "./make-4.0" ]]; then + wget http://ftp.gnu.org/gnu/make/make-4.0.tar.gz && + tar -vxzf make-4.0.tar.gz && + pushd make-4.0 && ./configure && make && popd; + fi + - pushd make-4.0 && sudo make install && popd + - if [[ ! -d "./glide-v0.12.3" ]]; then + wget https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-linux-amd64.tar.gz && + tar -vxzf glide-v0.12.3-linux-amd64.tar.gz && + mv linux-amd64 glide-v0.12.3; + fi + - pushd glide-v0.12.3 && sudo cp glide /usr/local/bin && popd + - popd + - /usr/local/bin/make --version + - /usr/local/bin/glide --version + +install: + - go get -u github.com/yunify/snips + - go get -u github.com/golang/lint/golint; + - glide install + +before_script: + - /usr/local/bin/make update + - /usr/local/bin/make generate + +script: + - /usr/local/bin/make check + - /usr/local/bin/make release + - /usr/local/bin/make test-runtime-go-${GO_VERSION} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/AUTHORS b/vendor/github.com/yunify/qingstor-sdk-go/AUTHORS new file mode 100644 index 000000000..fa4aa558a --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/AUTHORS @@ -0,0 +1,4 @@ +Patches have been contributed by (ordered by the time of the first merged patch): + +Jingwen Peng +Osier Yang diff --git a/vendor/github.com/yunify/qingstor-sdk-go/CHANGELOG.md b/vendor/github.com/yunify/qingstor-sdk-go/CHANGELOG.md new file mode 100644 index 000000000..d327e7bbe --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/CHANGELOG.md @@ -0,0 +1,107 @@ +# Change Log +All notable changes to QingStor SDK for Go will be documented in this file. + +## [v2.2.5] - 2017-05-22 + +### Fixed + +- Fix error in request URL query. +- Fix error in request header value. + +## [v2.2.4] - 2017-03-28 + +### Fixed + +- Fix type of Content-Type header. +- Add Content-Length to GetObjectOutput. +- Fix status code of DELETE CORS API. +- Fix type of object size for GET Bucket API. + +### BREAKING CHANGES + +- The type of content length and object size has been changed from `*int` to `*int64`. + +## [v2.2.3] - 2017-03-10 + +### Added + +- Allow user to append additional info to User-Agent + +## [v2.2.2] - 2017-03-08 + +### Fixed + +- Resource is not mandatory in bucket policy statement + +## [v2.2.1] - 2017-03-05 + +### Changed + +- Add "Encrypted" field to "KeyType" struct + +## [v2.2.0] - 2017-02-28 + +### Added + +- Add ListMultipartUploads API. + +### Fixed + +- Fix request builder & signer. + +## [v2.1.2] - 2017-01-16 + +### Fixed + +- Fix request signer. + +## [v2.1.1] - 2017-01-05 + +### Changed + +- Fix logger output format, don't parse special characters. +- Rename package "errs" to "errors". + +### Added + +- Add type converters. + +### BREAKING CHANGES + +- Change value type in input and output to pointer. + +## [v2.1.0] - 2016-12-23 + +### Changed + +- Fix signer bug. +- Add more parameters to sign. + +### Added + +- Add request parameters for GET Object. +- Add IP address conditions for bucket policy. + +## [v2.0.1] - 2016-12-15 + +### Changed + +- Improve the implementation of deleting multiple objects. + +## [v2.0.0] - 2016-12-14 + +### Added + +- QingStor SDK for the Go programming language. + +[v2.2.5]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.4...v2.2.5 +[v2.2.4]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.3...v2.2.4 +[v2.2.3]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.2...v2.2.3 +[v2.2.2]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.1...v2.2.2 +[v2.2.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.0...v2.2.1 +[v2.2.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.2...v2.2.0 +[v2.1.2]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.1...v2.1.2 +[v2.1.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.0...v2.1.1 +[v2.1.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.1...v2.1.0 +[v2.0.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.0...v2.0.1 +[v2.0.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.0...v2.0.0 diff --git a/vendor/github.com/yunify/qingstor-sdk-go/LICENSE b/vendor/github.com/yunify/qingstor-sdk-go/LICENSE new file mode 100644 index 000000000..c2e1ccf80 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012 Yunify Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/yunify/qingstor-sdk-go/MAINTAINERS b/vendor/github.com/yunify/qingstor-sdk-go/MAINTAINERS new file mode 100644 index 000000000..a9e08e815 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/MAINTAINERS @@ -0,0 +1 @@ +Jingwen Peng diff --git a/vendor/github.com/yunify/qingstor-sdk-go/Makefile b/vendor/github.com/yunify/qingstor-sdk-go/Makefile new file mode 100644 index 000000000..4e5a97c71 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/Makefile @@ -0,0 +1,207 @@ +SHELL := /bin/bash + +PREFIX=qingstor-sdk-go +VERSION=$(shell cat version.go | grep "Version\ =" | sed -e s/^.*\ //g | sed -e s/\"//g) +DIRS_TO_CHECK=$(shell ls -d */ | grep -vE "vendor|test") +PKGS_TO_CHECK=$(shell go list ./... | grep -v "/vendor/") +PKGS_TO_RELEASE=$(shell go list ./... | grep -vE "/vendor/|/test") +FILES_TO_RELEASE=$(shell find . -name "*.go" | grep -vE "/vendor/|/test|.*_test.go") +FILES_TO_RELEASE_WITH_VENDOR=$(shell find . -name "*.go" | grep -vE "/test|.*_test.go") + +.PHONY: help +help: + @echo "Please use \`make \` where is one of" + @echo " all to check, build, test and release this SDK" + @echo " check to vet and lint the SDK" + @echo " update to update git submodules" + @echo " generate to generate service code" + @echo " build to build the SDK" + @echo " test to run test" + @echo " test-coverage to run test with coverage" + @echo " test-race to run test with race" + @echo " test-runtime to run test in Go 1.8/1.7/1.6/1.5 in docker" + @echo " integration-test to run integration test" + @echo " release to build and release current version" + @echo " release-source to pack the source code" + @echo " clean to clean the coverage files" + +.PHONY: all +all: check build unit release + +.PHONY: check +check: vet lint + +.PHONY: vet +vet: + @echo "go tool vet, skipping vendor packages" + @go tool vet -all ${DIRS_TO_CHECK} + @echo "ok" + +.PHONY: lint +lint: + @echo "golint, skipping vendor packages" + @lint=$$(for pkg in ${PKGS_TO_CHECK}; do golint $${pkg}; done); \ + lint=$$(echo "$${lint}"); \ + if [[ -n $${lint} ]]; then echo "$${lint}"; exit 1; fi + @echo "ok" + +.PHONY: update +update: + git submodule update --remote + @echo "ok" + +.PHONY: generate +generate: + @if [[ ! -f "$$(which snips)" ]]; then \ + echo "ERROR: Command \"snips\" not found."; \ + fi + snips \ + --service=qingstor --service-api-version=latest \ + --spec="./specs" --template="./template" --output="./service" + gofmt -w . + @echo "ok" + +.PHONY: build +build: + @echo "build the SDK" + GOOS=linux GOARCH=amd64 go build ${PKGS_TO_RELEASE} + GOOS=darwin GOARCH=amd64 go build ${PKGS_TO_RELEASE} + GOOS=windows GOARCH=amd64 go build ${PKGS_TO_RELEASE} + @echo "ok" + +.PHONY: test +test: + @echo "run test" + go test -v ${PKGS_TO_RELEASE} + @echo "ok" + +.PHONY: test-coverage +test-coverage: + @echo "run test with coverage" + for pkg in ${PKGS_TO_RELEASE}; do \ + output="coverage$${pkg#github.com/yunify/qingstor-sdk-go}"; \ + mkdir -p $${output}; \ + go test -v -cover -coverprofile="$${output}/profile.out" $${pkg}; \ + if [[ -e "$${output}/profile.out" ]]; then \ + go tool cover -html="$${output}/profile.out" -o "$${output}/profile.html"; \ + fi; \ + done + @echo "ok" + +.PHONY: test-race +test-race: + @echo "run test with race" + go test -v -race -cpu=1,2,4 ${PKGS_TO_RELEASE} + @echo "ok" + +.PHONY: test-runtime +test-runtime: test-runtime-go-1.8 test-runtime-go-1.7 test-runtime-go-1.6 test-runtime-go-1.5 + +export define DOCKERFILE_GO_1_8 +FROM golang:1.8 + +ADD . /go/src/github.com/yunify/qingstor-sdk-go +WORKDIR /go/src/github.com/yunify/qingstor-sdk-go + +CMD ["make", "build", "test", "test-coverage"] +endef + +.PHONY: test-runtime-go-1.8 +test-runtime-go-1.8: + @echo "run test in go 1.8" + echo "$${DOCKERFILE_GO_1_8}" > "dockerfile_go_1.8" + docker build -f "./dockerfile_go_1.8" -t "${PREFIX}:go-1.8" . + rm -f "./dockerfile_go_1.8" + docker run --name "${PREFIX}-go-1.8-unit" -t "${PREFIX}:go-1.8" + docker rm "${PREFIX}-go-1.8-unit" + docker rmi "${PREFIX}:go-1.8" + @echo "ok" + +export define DOCKERFILE_GO_1_7 +FROM golang:1.7 + +ADD . /go/src/github.com/yunify/qingstor-sdk-go +WORKDIR /go/src/github.com/yunify/qingstor-sdk-go + +CMD ["make", "build", "test", "test-coverage"] +endef + +.PHONY: test-runtime-go-1.7 +test-runtime-go-1.7: + @echo "run test in go 1.7" + echo "$${DOCKERFILE_GO_1_7}" > "dockerfile_go_1.7" + docker build -f "./dockerfile_go_1.7" -t "${PREFIX}:go-1.7" . + rm -f "./dockerfile_go_1.7" + docker run --name "${PREFIX}-go-1.7-unit" -t "${PREFIX}:go-1.7" + docker rm "${PREFIX}-go-1.7-unit" + docker rmi "${PREFIX}:go-1.7" + @echo "ok" + +export define DOCKERFILE_GO_1_6 +FROM golang:1.6 + +ADD . /go/src/github.com/yunify/qingstor-sdk-go +WORKDIR /go/src/github.com/yunify/qingstor-sdk-go + +CMD ["make", "build", "test", "test-coverage"] +endef + +.PHONY: test-runtime-go-1.6 +test-runtime-go-1.6: + @echo "run test in go 1.6" + echo "$${DOCKERFILE_GO_1_6}" > "dockerfile_go_1.6" + docker build -f "./dockerfile_go_1.6" -t "${PREFIX}:go-1.6" . + rm -f "./dockerfile_go_1.6" + docker run --name "${PREFIX}-go-1.6-unit" -t "${PREFIX}:go-1.6" + docker rm "${PREFIX}-go-1.6-unit" + docker rmi "${PREFIX}:go-1.6" + @echo "ok" + +export define DOCKERFILE_GO_1_5 +FROM golang:1.5 +ENV GO15VENDOREXPERIMENT="1" + +ADD . /go/src/github.com/yunify/qingstor-sdk-go +WORKDIR /go/src/github.com/yunify/qingstor-sdk-go + +CMD ["make", "build", "test", "test-coverage"] +endef + +.PHONY: test-runtime-go-1.5 +test-runtime-go-1.5: + @echo "run test in go 1.5" + echo "$${DOCKERFILE_GO_1_5}" > "dockerfile_go_1.5" + docker build -f "dockerfile_go_1.5" -t "${PREFIX}:go-1.5" . + rm -f "dockerfile_go_1.5" + docker run --name "${PREFIX}-go-1.5-unit" -t "${PREFIX}:go-1.5" + docker rm "${PREFIX}-go-1.5-unit" + docker rmi "${PREFIX}:go-1.5" + @echo "ok" + +.PHONY: integration-test +integration-test: + @echo "run integration test" + pushd "./test"; go run *.go; popd + @echo "ok" + +.PHONY: release +release: release-source release-source-with-vendor + +.PHONY: release-source +release-source: + @echo "pack the source code" + mkdir -p "release" + zip -FS "release/${PREFIX}-source-v${VERSION}.zip" ${FILES_TO_RELEASE} + @echo "ok" + +.PHONY: release-source-with-vendor +release-source-with-vendor: + @echo "pack the source code" + mkdir -p "release" + zip -FS "release/${PREFIX}-source-with-vendor-v${VERSION}.zip" ${FILES_TO_RELEASE_WITH_VENDOR} + @echo "ok" + +.PHONY: clean +clean: + rm -rf $${PWD}/coverage + @echo "ok" diff --git a/vendor/github.com/yunify/qingstor-sdk-go/README.md b/vendor/github.com/yunify/qingstor-sdk-go/README.md new file mode 100644 index 000000000..497ef9ba3 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/README.md @@ -0,0 +1,84 @@ +# QingStor SDK for Go + +[![Build Status](https://travis-ci.org/yunify/qingstor-sdk-go.svg?branch=master)](https://travis-ci.org/yunify/qingstor-sdk-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/yunify/qingstor-sdk-go)](https://goreportcard.com/report/github.com/yunify/qingstor-sdk-go) +[![API Reference](http://img.shields.io/badge/api-reference-green.svg)](http://docs.qingcloud.com/qingstor/) +[![License](http://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/yunify/qingstor-sdk-go/blob/master/LICENSE) + +The official QingStor SDK for the Go programming language. + +## Getting Started + +### Installation + +Refer to the [Installation Guide](docs/installation.md), and have this SDK installed. + +### Preparation + +Before your start, please go to [QingCloud Console](https://console.qingcloud.com/access_keys/) to create a pair of QingCloud API AccessKey. + +___API AccessKey Example:___ + +``` yaml +access_key_id: 'ACCESS_KEY_ID_EXAMPLE' +secret_access_key: 'SECRET_ACCESS_KEY_EXAMPLE' +``` + +### Usage + +Now you are ready to code. You can read the detailed guides in the list below to have a clear understanding or just take the quick start code example. + +Checkout our [releases](https://github.com/yunify/qingstor-sdk-go/releases) and [change log](https://github.com/yunify/qingstor-sdk-go/blob/master/CHANGELOG.md) for information about the latest features, bug fixes and new ideas. + +- [Configuration Guide](docs/configuration.md) +- [QingStor Service Usage Guide](docs/qingstor_service_usage.md) + +___Quick Start Code Example:___ + +``` go +package main + +import ( + "fmt" + + "github.com/yunify/qingstor-sdk-go/config" + qs "github.com/yunify/qingstor-sdk-go/service" +) + +func main() { + conf, _ := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY") + + // Initialize service object for QingStor. + qsService, _ := qs.Init(conf) + + // List all buckets. + qsOutput, _ := qsService.ListBuckets(&qs.ListBucketsInput{}) + + // Print HTTP status code. + fmt.Println(qs.IntValue(qsOutput.StatusCode)) + + // Print the count of buckets. + fmt.Println(qs.IntValue(qsOutput.Count)) + + // Print the first bucket name. + fmt.Println(qs.StringValue(qsOutput.Buckets[0].Name)) +} +``` + +## Reference Documentations + +- [QingStor Documentation](https://docs.qingcloud.com/qingstor/index.html) +- [QingStor Guide](https://docs.qingcloud.com/qingstor/guide/index.html) +- [QingStor APIs](https://docs.qingcloud.com/qingstor/api/index.html) + +## Contributing + +1. Fork it ( https://github.com/yunify/qingstor-sdk-go/fork ) +2. Create your feature branch (`git checkout -b new-feature`) +3. Commit your changes (`git commit -asm 'Add some feature'`) +4. Push to the branch (`git push origin new-feature`) +5. Create a new Pull Request + +## LICENSE + +The Apache License (Version 2.0, January 2004). diff --git a/vendor/github.com/yunify/qingstor-sdk-go/config/config.go b/vendor/github.com/yunify/qingstor-sdk-go/config/config.go new file mode 100644 index 000000000..fabb5efd8 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/config/config.go @@ -0,0 +1,172 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package config + +import ( + "errors" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/pengsrc/go-shared/yaml" + + "github.com/yunify/qingstor-sdk-go/logger" +) + +// A Config stores a configuration of this sdk. +type Config struct { + AccessKeyID string `yaml:"access_key_id"` + SecretAccessKey string `yaml:"secret_access_key"` + + Host string `yaml:"host"` + Port int `yaml:"port"` + Protocol string `yaml:"protocol"` + ConnectionRetries int `yaml:"connection_retries"` + + AdditionalUserAgent string `yaml:"additional_user_agent"` + + LogLevel string `yaml:"log_level"` + + Connection *http.Client +} + +// New create a Config with given AccessKeyID and SecretAccessKey. +func New(accessKeyID, secretAccessKey string) (*Config, error) { + config, err := NewDefault() + if err != nil { + return nil, err + } + + config.AccessKeyID = accessKeyID + config.SecretAccessKey = secretAccessKey + + config.Connection = &http.Client{} + + return config, nil +} + +// NewDefault create a Config with default configuration. +func NewDefault() (*Config, error) { + config := &Config{} + err := config.LoadDefaultConfig() + if err != nil { + return nil, err + } + config.Connection = &http.Client{} + + return config, nil +} + +// Check checks the configuration. +func (c *Config) Check() error { + if c.AccessKeyID == "" { + return errors.New("access key ID not specified") + } + if c.SecretAccessKey == "" { + return errors.New("secret access key not specified") + } + + if c.Host == "" { + return errors.New("server host not specified") + } + if c.Port <= 0 { + return errors.New("server port not specified") + } + if c.Protocol == "" { + return errors.New("server protocol not specified") + } + + if c.AdditionalUserAgent != "" { + for _, x := range c.AdditionalUserAgent { + // Allow space(32) to ~(126) in ASCII Table, exclude "(34). + if int(x) < 32 || int(x) > 126 || int(x) == 32 || int(x) == 34 { + return errors.New("additional User-Agent contains characters that not allowed") + } + } + } + + err := logger.CheckLevel(c.LogLevel) + if err != nil { + return err + } + + return nil +} + +// LoadDefaultConfig loads the default configuration for Config. +// It returns error if yaml decode failed. +func (c *Config) LoadDefaultConfig() error { + _, err := yaml.Decode([]byte(DefaultConfigFileContent), c) + if err != nil { + logger.Error("Config parse error: " + err.Error()) + return err + } + + logger.SetLevel(c.LogLevel) + + return nil +} + +// LoadUserConfig loads user configuration in ~/.qingstor/config.yaml for Config. +// It returns error if file not found. +func (c *Config) LoadUserConfig() error { + _, err := os.Stat(GetUserConfigFilePath()) + if err != nil { + logger.Warn("Installing default config file to \"" + GetUserConfigFilePath() + "\"") + InstallDefaultUserConfig() + } + + return c.LoadConfigFromFilePath(GetUserConfigFilePath()) +} + +// LoadConfigFromFilePath loads configuration from a specified local path. +// It returns error if file not found or yaml decode failed. +func (c *Config) LoadConfigFromFilePath(filepath string) error { + if strings.Index(filepath, "~/") == 0 { + filepath = strings.Replace(filepath, "~/", getHome()+"/", 1) + } + + yamlString, err := ioutil.ReadFile(filepath) + if err != nil { + logger.Error("File not found: " + filepath) + return err + } + + return c.LoadConfigFromContent(yamlString) +} + +// LoadConfigFromContent loads configuration from a given byte slice. +// It returns error if yaml decode failed. +func (c *Config) LoadConfigFromContent(content []byte) error { + c.LoadDefaultConfig() + + _, err := yaml.Decode(content, c) + if err != nil { + logger.Error("Config parse error: " + err.Error()) + return err + } + + err = c.Check() + if err != nil { + return err + } + + logger.SetLevel(c.LogLevel) + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/config/config_test.go b/vendor/github.com/yunify/qingstor-sdk-go/config/config_test.go new file mode 100644 index 000000000..fa295d053 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/config/config_test.go @@ -0,0 +1,108 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/yunify/qingstor-sdk-go/logger" +) + +func TestConfig(t *testing.T) { + c := Config{ + AccessKeyID: "AccessKeyID", + SecretAccessKey: "SecretAccessKey", + Host: "qingstor.dev", + Port: 443, + Protocol: "https", + ConnectionRetries: 10, + LogLevel: "warn", + } + + assert.Equal(t, "AccessKeyID", c.AccessKeyID) + assert.Equal(t, "SecretAccessKey", c.SecretAccessKey) + assert.Equal(t, "qingstor.dev", c.Host) + assert.Equal(t, 10, c.ConnectionRetries) + assert.Equal(t, "warn", c.LogLevel) + + c.AdditionalUserAgent = `"` + assert.Error(t, c.Check()) + + c.AdditionalUserAgent = `test/user` + assert.NoError(t, c.Check()) +} + +func TestLoadDefaultConfig(t *testing.T) { + config := Config{} + config.LoadDefaultConfig() + + assert.Equal(t, "", config.AccessKeyID) + assert.Equal(t, "", config.SecretAccessKey) + assert.Equal(t, "https", config.Protocol) + assert.Equal(t, "qingstor.com", config.Host) + assert.Equal(t, "", config.AdditionalUserAgent) + assert.Equal(t, "warning", logger.GetLevel()) +} + +func TestLoadUserConfig(t *testing.T) { + config := Config{} + config.LoadUserConfig() + + assert.NotNil(t, config.Host) + assert.NotNil(t, config.Protocol) +} + +func TestLoadConfigFromContent(t *testing.T) { + fileContent := ` +access_key_id: 'access_key_id' +secret_access_key: 'secret_access_key' + +log_level: 'debug' + +` + + config := Config{} + config.LoadConfigFromContent([]byte(fileContent)) + + assert.Equal(t, "access_key_id", config.AccessKeyID) + assert.Equal(t, "secret_access_key", config.SecretAccessKey) + assert.Equal(t, "https", config.Protocol) + assert.Equal(t, "qingstor.com", config.Host) + assert.Equal(t, "debug", logger.GetLevel()) +} + +func TestNewDefault(t *testing.T) { + config, err := NewDefault() + assert.Nil(t, err) + + assert.Equal(t, "", config.AccessKeyID) + assert.Equal(t, "", config.SecretAccessKey) + assert.Equal(t, "https", config.Protocol) + assert.Equal(t, "qingstor.com", config.Host) + assert.Equal(t, 3, config.ConnectionRetries) +} + +func TestNew(t *testing.T) { + config, err := New("AccessKeyID", "SecretAccessKey") + assert.Nil(t, err) + + assert.Equal(t, "AccessKeyID", config.AccessKeyID) + assert.Equal(t, "SecretAccessKey", config.SecretAccessKey) + assert.Equal(t, "https", config.Protocol) + assert.Equal(t, "qingstor.com", config.Host) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/config/contract.go b/vendor/github.com/yunify/qingstor-sdk-go/config/contract.go new file mode 100644 index 000000000..7b76691bc --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/config/contract.go @@ -0,0 +1,74 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package config + +import ( + "io/ioutil" + "os" + "path" + "runtime" + "strings" +) + +// DefaultConfigFileContent is the content of default config file. +const DefaultConfigFileContent = `# QingStor services configuration + +#access_key_id: ACCESS_KEY_ID +#secret_access_key: SECRET_ACCESS_KEY + +host: qingstor.com +port: 443 +protocol: https +connection_retries: 3 + +# Additional User-Agent +additional_user_agent: "" + +# Valid log levels are "debug", "info", "warn", "error", and "fatal". +log_level: warn + +` + +// DefaultConfigFile is the filename of default config file. +const DefaultConfigFile = "~/.qingstor/config.yaml" + +// GetUserConfigFilePath returns the user config file path. +func GetUserConfigFilePath() string { + return strings.Replace(DefaultConfigFile, "~/", getHome()+"/", 1) +} + +// InstallDefaultUserConfig will install default config file. +func InstallDefaultUserConfig() error { + err := os.MkdirAll(path.Dir(GetUserConfigFilePath()), 0755) + if err != nil { + return err + } + + return ioutil.WriteFile(GetUserConfigFilePath(), []byte(DefaultConfigFileContent), 0644) +} + +func getHome() string { + home := os.Getenv("HOME") + if runtime.GOOS == "windows" { + home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + if home == "" { + home = os.Getenv("USERPROFILE") + } + } + + return home +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/docs/configuration.md b/vendor/github.com/yunify/qingstor-sdk-go/docs/configuration.md new file mode 100644 index 000000000..f88936ff9 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/docs/configuration.md @@ -0,0 +1,69 @@ +# Configuration Guide + +## Summary + +This SDK uses a structure called "Config" to store and manage configuration, read comments of public functions in ["config/config.go"](https://github.com/yunify/qingstor-sdk-go/blob/master/config/config.go) for details. + +Except for Access Key, you can also configure the API endpoint for private cloud usage scenario. All available configurable items are listed in the default configuration file. + +___Default Configuration File:___ + +``` yaml +# QingStor services configuration + +access_key_id: 'ACCESS_KEY_ID' +secret_access_key: 'SECRET_ACCESS_KEY' + +host: 'qingstor.com' +port: 443 +protocol: 'https' +connection_retries: 3 + +# Valid log levels are "debug", "info", "warn", "error", and "fatal". +log_level: 'warn' + +``` + +## Usage + +Just create a config structure instance with your API Access Key, and initialize services you need with Init() function of the target service. + +### Code Snippet + +Create default configuration + +``` go +defaultConfig, _ := config.NewDefault() +``` + +Create configuration from Access Key + +``` go +configuration, _ := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY") + +anotherConfiguration := config.NewDefault() +anotherConfiguration.AccessKeyID = "ACCESS_KEY_ID" +anotherConfiguration.SecretAccessKey = "SECRET_ACCESS_KEY" +``` + +Load user configuration + +``` go +userConfig, _ := config.NewDefault().LoadUserConfig() +``` + +Load configuration from config file + +``` go +configFromFile, _ := config.NewDefault().LoadConfigFromFilepath("PATH/TO/FILE") +``` + +Change API endpoint + +``` go +moreConfiguration, _ := config.NewDefault() + +moreConfiguration.Protocol = "http" +moreConfiguration.Host = "api.private.com" +moreConfiguration.Port = 80 +``` diff --git a/vendor/github.com/yunify/qingstor-sdk-go/docs/installation.md b/vendor/github.com/yunify/qingstor-sdk-go/docs/installation.md new file mode 100644 index 000000000..fd9b842b6 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/docs/installation.md @@ -0,0 +1,39 @@ +# Installation Guide + +## Requirement + +This SDK requires Go 1.6 and higher vendor feature, the dependencies this project uses are included in the `vendor` directory. We use [glide](https://glide.sh) to manage project dependences. + +___Notice:___ _You can also use Go 1.5 with the `GO15VENDOREXPERIMENT=1`._ + +## Install from source code + +Use `go get` to download this SDK from GitHub: + +``` bash +$ go get -u github.com/yunify/qingstor-sdk-go +``` + +You can also download a specified version of zipped source code in the repository [releases page](https://github.com/yunify/qingstor-sdk-go/releases). The zipped source code only contains golang source code without unit test files. + +___Examples:___ + +- *[qingstor-sdk-go-source-v0.7.1.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-source-v0.7.1.zip)* +- *[qingstor-sdk-go-source-with-vendor-v0.7.1.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-source-with-vendor-v0.7.1.zip)* + +## Install from binary release (deprecated) + +After Go 1.7, there's a new feature called Binary-Only Package. It allows distributing packages in binary form without including the source code used for compiling the package. For more information about Binary-Only Package, please read [_GoLang Package Build_](https://golang.org/pkg/go/build/) to know how to use that. + +We provide Linux, macOS and Windows binary packages along with a header files. A header file only contains three lines of content, "//go:binary-only-package" is the first line, the second line is blank, and the second is the package name. There's one header file named "binary.go" for each golang package. + +You can download a specified version of zipped binary release in the repository [releases page](https://github.com/yunify/qingstor-sdk-go/releases). + +___Notice:___ _We didn't provide 386 version binary packages, since there's almost no one using a 386 machine._ + +___Examples:___ + +- *[qingstor-sdk-go-header-v0.7.1-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-header-v0.7.1-go-1.7.zip)* +- *[qingstor-sdk-go-binary-v0.7.1-linux_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-linux_amd64-go-1.7.zip)* +- *[qingstor-sdk-go-binary-v0.7.1-darwin_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-darwin_amd64-go-1.7.zip)* +- *[qingstor-sdk-go-binary-v0.7.1-windows_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-windows_amd64-go-1.7.zip)* diff --git a/vendor/github.com/yunify/qingstor-sdk-go/docs/qingstor_service_usage.md b/vendor/github.com/yunify/qingstor-sdk-go/docs/qingstor_service_usage.md new file mode 100644 index 000000000..e4b180add --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/docs/qingstor_service_usage.md @@ -0,0 +1,220 @@ +# QingStor Service Usage Guide + +Import the QingStor and initialize service with a config, and you are ready to use the initialized service. Service only contains one API, and it is "ListBuckets". +To use bucket related APIs, you need to initialize a bucket from service using "Bucket" function. + +Each API function take a Input struct and return an Output struct. The Input struct consists of request params, request headers, request elements and request body, and the Output holds the HTTP status code, QingStor request ID, response headers, response elements, response body and error (if error occurred). + +You can use a specified version of a service by import a service package with a date suffix. + +``` go +import ( + // Import the latest version API + qs "github.com/yunify/qingstor-sdk-go/service" +) +``` + +### Code Snippet + +Initialize the QingStor service with a configuration + +``` go +qsService, _ := qs.Init(configuration) +``` + +List buckets + +``` go +qsOutput, _ := qsService.ListBuckets(nil) + +// Print the HTTP status code. +// Example: 200 +fmt.Println(qs.IntValue(qsOutput.StatusCode)) + +// Print the bucket count. +// Example: 5 +fmt.Println(qs.IntValue(qsOutput.Count)) + +// Print the name of first bucket. +// Example: "test-bucket" +fmt.Println(qs.String(qsOutput.Buckets[0].Name)) +``` + +Initialize a QingStor bucket + +``` go +bucket, _ := qsService.Bucket("test-bucket", "pek3a") +``` + +List objects in the bucket + +``` go +bOutput, _ := bucket.ListObjects(nil) + +// Print the HTTP status code. +// Example: 200 +fmt.Println(qs.IntValue(bOutput.StatusCode)) + +// Print the key count. +// Example: 7 +fmt.Println(len(bOutput.Keys)) +``` + +Set ACL of the bucket + +``` go +bACLOutput, _ := bucket.PutACL(&qs.PutBucketACLInput{ + ACL: []*service.ACLType{{ + Grantee: &service.GranteeType{ + Type: qs.String("user"), + ID: qs.String("usr-xxxxxxxx"), + }, + Permission: qs.String("FULL_CONTROL"), + }}, +}) + +// Print the HTTP status code. +// Example: 200 +fmt.Println(qs.IntValue(bACLOutput.StatusCode)) +``` + +Put object + +``` go +// Open file +file, _ := os.Open("/tmp/Screenshot.jpg") +defer file.Close() + +// Calculate MD5 +hash := md5.New() +io.Copy(hash, file) +hashInBytes := hash.Sum(nil)[:16] +md5String := hex.EncodeToString(hashInBytes) + +// Put object +oOutput, _ := bucket.PutObject( + "Screenshot.jpg", + &service.PutObjectInput{ + ContentLength: qs.Int(102475), // Obtain automatically if empty + ContentType: qs.String("image/jpeg"), // Detect automatically if empty + ContentMD5: qs.String(md5String), + Body: file, + }, +) + +// Print the HTTP status code. +// Example: 201 +fmt.Println(qs.IntValue(oOutput.StatusCode)) +``` + +Delete object + +``` go +oOutput, _ := bucket.DeleteObject("Screenshot.jpg") + +// Print the HTTP status code. +// Example: 204 +fmt.Println(qs.IntValue(oOutput.StatusCode)) +``` + +Initialize Multipart Upload + +``` go +aOutput, _ := bucket.InitiateMultipartUpload( + "QingCloudInsight.mov", + &service.InitiateMultipartUploadInput{ + ContentType: qs.String("video/quicktime"), + }, +) + +// Print the HTTP status code. +// Example: 200 +fmt.Println(qs.IntValue(aOutput.StatusCode)) + +// Print the upload ID. +// Example: "9d37dd6ccee643075ca4e597ad65655c" +fmt.Println(qs.StringValue(aOutput.UploadID)) +``` + +Upload Multipart + +``` go +aOutput, _ := bucket.UploadMultipart( + "QingCloudInsight.mov", + &service.UploadMultipartInput{ + UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), + PartNumber: qs.Int(0), + ContentMD5: qs.String(md5String0), + Body: file0, + }, +) + +// Print the HTTP status code. +// Example: 201 +fmt.Println(qs.IntValue(aOutput.StatusCode)) + +aOutput, _ = bucket.UploadMultipart( + "QingCloudInsight.mov", + &service.UploadMultipartInput{ + UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), + PartNumber: qs.Int(1), + ContentMD5: qs.String(md5String1), + Body: file1, + }, +) + +// Print the HTTP status code. +// Example: 201 +fmt.Println(qs.IntValue(aOutput.StatusCode)) + +aOutput, _ = bucket.UploadMultipart( + "QingCloudInsight.mov" + &service.UploadMultipartInput{ + UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), + PartNumber: qs.Int(2), + ContentMD5: qs.String(md5String2), + Body: file2, + }, +) + +// Print the HTTP status code. +// Example: 201 +fmt.Println(qs.IntValue(aOutput.StatusCode)) +``` + +Complete Multipart Upload + +``` go +aOutput, _ := bucket.CompleteMultipartUpload( + "QingCloudInsight.mov", + &service.CompleteMultipartUploadInput{ + UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), + ObjectParts: []*service.ObjectPart{{ + PartNumber: qs.Int(0), + }, { + PartNumber: qs.Int(1), + }, { + PartNumber: qs.Int(2), + }}, + }, +) + +// Print the HTTP status code. +// Example: 200 +fmt.Println(qs.IntValue(aOutput.StatusCode)) +``` + +Abort Multipart Upload + +``` go +aOutput, err := bucket.AbortMultipartUpload( + "QingCloudInsight.mov" + &service.AbortMultipartUploadInput{ + UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), + }, +) + +// Print the error message. +// Example: QingStor Error: StatusCode 400, Code... +fmt.Println(err) +``` diff --git a/vendor/github.com/yunify/qingstor-sdk-go/glide.lock b/vendor/github.com/yunify/qingstor-sdk-go/glide.lock new file mode 100644 index 000000000..ad29d5cd3 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/glide.lock @@ -0,0 +1,33 @@ +hash: 2e6b1ed4a2ee0638abc2e819ac3c247eaf80fa0f2053cfc41eecf883054c6032 +updated: 2017-05-22T14:58:58.927797848+08:00 +imports: +- name: github.com/pengsrc/go-shared + version: 454950d6a0782c34427d4f29b46c6bf447256f20 + subpackages: + - check + - convert + - json + - yaml +- name: github.com/Sirupsen/logrus + version: d26492970760ca5d33129d2d799e34be5c4782eb +- name: golang.org/x/sys + version: f3918c30c5c2cb527c0b071a27c35120a6c0719a + repo: https://github.com/golang/sys.git + subpackages: + - unix +- name: gopkg.in/yaml.v2 + version: a5b47d31c556af34a302ce5d659e6fea44d90de0 + repo: https://github.com/go-yaml/yaml.git +testImports: +- name: github.com/davecgh/go-spew + version: 346938d642f2ec3594ed81d874461961cd0faa76 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + subpackages: + - assert diff --git a/vendor/github.com/yunify/qingstor-sdk-go/glide.yaml b/vendor/github.com/yunify/qingstor-sdk-go/glide.yaml new file mode 100644 index 000000000..7503d8c99 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/glide.yaml @@ -0,0 +1,4 @@ +package: github.com/yunify/qingstor-sdk-go +import: +- package: github.com/pengsrc/go-shared + version: v0.0.8 diff --git a/vendor/github.com/yunify/qingstor-sdk-go/logger/logger.go b/vendor/github.com/yunify/qingstor-sdk-go/logger/logger.go new file mode 100644 index 000000000..c04aba2d4 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/logger/logger.go @@ -0,0 +1,119 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +// Package logger provides support for logging to stdout and stderr. +// Log entries will be logged with format: $timestamp $hostname [$pid]: $severity $message. +package logger + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/Sirupsen/logrus" +) + +var instance *logrus.Logger + +// LogFormatter is used to format log entry. +type LogFormatter struct{} + +// Format formats a given log entry, returns byte slice and error. +func (c *LogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + level := strings.ToUpper(entry.Level.String()) + if level == "WARNING" { + level = "WARN" + } + if len(level) < 5 { + level = strings.Repeat(" ", 5-len(level)) + level + } + + return []byte(fmt.Sprintf( + "[%s #%d] %s -- : %s\n", + time.Now().Format("2006-01-02T15:04:05.000Z"), + os.Getpid(), + level, + entry.Message)), nil +} + +// SetOutput set the destination for the log output +func SetOutput(out io.Writer) { + instance.Out = out +} + +// CheckLevel checks whether the log level is valid. +func CheckLevel(level string) error { + if _, err := logrus.ParseLevel(level); err != nil { + return fmt.Errorf(`log level not valid: "%s"`, level) + } + return nil +} + +// GetLevel get the log level string. +func GetLevel() string { + return instance.Level.String() +} + +// SetLevel sets the log level. Valid levels are "debug", "info", "warn", "error", and "fatal". +func SetLevel(level string) { + lvl, err := logrus.ParseLevel(level) + if err != nil { + Fatal(fmt.Sprintf(`log level not valid: "%s"`, level)) + } + instance.Level = lvl +} + +// Debug logs a message with severity DEBUG. +func Debug(format string, v ...interface{}) { + output(instance.Debug, format, v...) +} + +// Info logs a message with severity INFO. +func Info(format string, v ...interface{}) { + output(instance.Info, format, v...) +} + +// Warn logs a message with severity WARN. +func Warn(format string, v ...interface{}) { + output(instance.Warn, format, v...) +} + +// Error logs a message with severity ERROR. +func Error(format string, v ...interface{}) { + output(instance.Error, format, v...) +} + +// Fatal logs a message with severity ERROR followed by a call to os.Exit(). +func Fatal(format string, v ...interface{}) { + output(instance.Fatal, format, v...) +} + +func output(origin func(...interface{}), format string, v ...interface{}) { + if len(v) > 0 { + origin(fmt.Sprintf(format, v...)) + } else { + origin(format) + } +} + +func init() { + instance = logrus.New() + instance.Formatter = &LogFormatter{} + instance.Out = os.Stderr + instance.Level = logrus.WarnLevel +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base.go b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base.go new file mode 100644 index 000000000..77609e124 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base.go @@ -0,0 +1,303 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package builder + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + "unicode" + + "github.com/pengsrc/go-shared/convert" + "github.com/pengsrc/go-shared/json" + + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/utils" +) + +// BaseBuilder is the base builder for all services. +type BaseBuilder struct { + parsedURL string + parsedProperties *map[string]string + parsedParams *map[string]string + parsedHeaders *map[string]string + parsedBodyString string + parsedBody io.Reader + + operation *data.Operation + input *reflect.Value +} + +// BuildHTTPRequest builds http request with an operation and an input. +func (b *BaseBuilder) BuildHTTPRequest(o *data.Operation, i *reflect.Value) (*http.Request, error) { + b.operation = o + b.input = i + + _, err := b.parse() + if err != nil { + return nil, err + } + + return b.build() +} + +func (b *BaseBuilder) build() (*http.Request, error) { + httpRequest, err := http.NewRequest(b.operation.RequestMethod, b.parsedURL, b.parsedBody) + if err != nil { + return nil, err + } + + err = b.setupHeaders(httpRequest) + if err != nil { + return nil, err + } + + return httpRequest, nil +} + +func (b *BaseBuilder) parse() (*BaseBuilder, error) { + err := b.parseRequestParamsAndHeaders() + if err != nil { + return b, err + } + err = b.parseRequestBody() + if err != nil { + return b, err + } + err = b.parseRequestProperties() + if err != nil { + return b, err + } + err = b.parseRequestURL() + if err != nil { + return b, err + } + + return b, nil +} + +func (b *BaseBuilder) parseRequestParamsAndHeaders() error { + requestParams := map[string]string{} + requestHeaders := map[string]string{} + maps := map[string](map[string]string){ + "params": requestParams, + "headers": requestHeaders, + } + + b.parsedParams = &requestParams + b.parsedHeaders = &requestHeaders + + if !b.input.IsValid() { + return nil + } + + fields := b.input.Elem() + if !fields.IsValid() { + return nil + } + + for i := 0; i < fields.NumField(); i++ { + tagName := fields.Type().Field(i).Tag.Get("name") + tagLocation := fields.Type().Field(i).Tag.Get("location") + if tagDefault := fields.Type().Field(i).Tag.Get("default"); tagDefault != "" { + maps[tagLocation][tagName] = tagDefault + } + if tagName != "" && tagLocation != "" && maps[tagLocation] != nil { + switch value := fields.Field(i).Interface().(type) { + case *string: + if value != nil { + maps[tagLocation][tagName] = *value + } + case *int: + if value != nil { + maps[tagLocation][tagName] = strconv.Itoa(int(*value)) + } + case *int64: + if value != nil { + maps[tagLocation][tagName] = strconv.FormatInt(int64(*value), 10) + } + case *bool: + case *time.Time: + if value != nil { + formatString := fields.Type().Field(i).Tag.Get("format") + format := "" + switch formatString { + case "RFC 822": + format = convert.RFC822 + case "ISO 8601": + format = convert.ISO8601 + } + maps[tagLocation][tagName] = convert.TimeToString(*value, format) + } + } + } + } + + return nil +} + +func (b *BaseBuilder) parseRequestBody() error { + requestData := map[string]interface{}{} + + if !b.input.IsValid() { + return nil + } + + fields := b.input.Elem() + if !fields.IsValid() { + return nil + } + + for i := 0; i < fields.NumField(); i++ { + location := fields.Type().Field(i).Tag.Get("location") + if location == "elements" { + name := fields.Type().Field(i).Tag.Get("name") + requestData[name] = fields.Field(i).Interface() + } + } + + if len(requestData) != 0 { + dataValue, err := json.Encode(requestData, true) + if err != nil { + return err + } + + b.parsedBodyString = string(dataValue) + b.parsedBody = strings.NewReader(b.parsedBodyString) + (*b.parsedHeaders)["Content-Type"] = "application/json" + } else { + value := fields.FieldByName("Body") + if value.IsValid() { + switch value.Interface().(type) { + case string: + if value.String() != "" { + b.parsedBodyString = value.String() + b.parsedBody = strings.NewReader(value.String()) + } + case io.Reader: + if value.Interface().(io.Reader) != nil { + b.parsedBody = value.Interface().(io.Reader) + } + } + } + } + + return nil +} + +func (b *BaseBuilder) parseRequestProperties() error { + propertiesMap := map[string]string{} + b.parsedProperties = &propertiesMap + + if b.operation.Properties != nil { + fields := reflect.ValueOf(b.operation.Properties).Elem() + if fields.IsValid() { + for i := 0; i < fields.NumField(); i++ { + switch value := fields.Field(i).Interface().(type) { + case *string: + if value != nil { + propertiesMap[fields.Type().Field(i).Tag.Get("name")] = *value + } + case *int: + if value != nil { + numberString := strconv.Itoa(int(*value)) + propertiesMap[fields.Type().Field(i).Tag.Get("name")] = numberString + } + } + } + } + } + + return nil +} + +func (b *BaseBuilder) parseRequestURL() error { + return nil +} + +func (b *BaseBuilder) setupHeaders(httpRequest *http.Request) error { + if b.parsedHeaders != nil { + + for headerKey, headerValue := range *b.parsedHeaders { + if headerKey == "X-QS-Fetch-Source" { + // header X-QS-Fetch-Source is a URL to fetch. + // We should first parse this URL. + requestURL, err := url.Parse(headerValue) + if err != nil { + return fmt.Errorf("invalid HTTP header value: %s", headerValue) + } + headerValue = requestURL.String() + } else { + for _, r := range headerValue { + if r > unicode.MaxASCII { + headerValue = utils.URLQueryEscape(headerValue) + break + } + } + } + + httpRequest.Header.Set(headerKey, headerValue) + } + } + + if httpRequest.Header.Get("Content-Length") == "" { + var length int64 + switch body := b.parsedBody.(type) { + case nil: + length = 0 + case io.Seeker: + //start, err := body.Seek(0, io.SeekStart) + start, err := body.Seek(0, 0) + if err != nil { + return err + } + //end, err := body.Seek(0, io.SeekEnd) + end, err := body.Seek(0, 2) + if err != nil { + return err + } + //body.Seek(0, io.SeekStart) + body.Seek(0, 0) + length = end - start + default: + return errors.New("Can not get Content-Length") + } + if length > 0 { + httpRequest.ContentLength = length + httpRequest.Header.Set("Content-Length", strconv.Itoa(int(length))) + } else { + httpRequest.Header.Set("Content-Length", "0") + } + } + length, err := strconv.Atoi(httpRequest.Header.Get("Content-Length")) + if err != nil { + return err + } + httpRequest.ContentLength = int64(length) + + if httpRequest.Header.Get("Date") == "" { + httpRequest.Header.Set("Date", convert.TimeToString(time.Now(), convert.RFC822)) + } + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base_test.go new file mode 100644 index 000000000..d4b3eeaca --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/base_test.go @@ -0,0 +1,125 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package builder + +import ( + "bytes" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +type FakeProperties struct { + A *string `name:"a"` + B *string `name:"b"` + CD *int `name:"c-d"` +} +type FakeInput struct { + ParamA *string `location:"params" name:"a"` + ParamB *string `location:"params" name:"b"` + ParamCD *int `location:"params" name:"c_d" default:"1024"` + HeaderA *string `location:"headers" name:"A"` + HeaderB *time.Time `location:"headers" name:"B" format:"RFC 822"` + HeaderCD *int `location:"headers" name:"C-D"` + ElementA *string `location:"elements" name:"a"` + ElementB *string `location:"elements" name:"b"` + ElementCD *int64 `location:"elements" name:"cd"` + Body *string `localtion:"body"` +} + +func (i *FakeInput) Validate() error { + return nil +} + +func String(v string) *string { + return &v +} + +func Int(v int) *int { + return &v +} + +func Int64(v int64) *int64 { + return &v +} + +func Time(v time.Time) *time.Time { + return &v +} + +func TestBaseBuilder_BuildHTTPRequest(t *testing.T) { + conf, err := config.NewDefault() + assert.Nil(t, err) + + tz, err := time.LoadLocation("Asia/Shanghai") + assert.Nil(t, err) + + builder := BaseBuilder{} + operation := &data.Operation{ + Config: conf, + APIName: "This is API name", + ServiceName: "Base", + Properties: &FakeProperties{ + A: String("property_a"), + B: String("property_b"), + CD: Int(0), + }, + RequestMethod: "GET", + RequestURI: "/hello////world", + StatusCodes: []int{ + 200, + 201, + }, + } + inputValue := reflect.ValueOf(&FakeInput{ + ParamA: String("param_a"), + ParamCD: Int(1024), + HeaderA: String("header_a"), + HeaderB: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, tz)), + ElementA: String("element_a"), + ElementB: String("element_b"), + ElementCD: Int64(0), + Body: String("This is body string"), + }) + httpRequest, err := builder.BuildHTTPRequest(operation, &inputValue) + assert.Nil(t, err) + assert.Equal(t, &map[string]string{ + "a": "property_a", + "b": "property_b", + "c-d": "0", + }, builder.parsedProperties) + assert.Equal(t, &map[string]string{ + "a": "param_a", + "c_d": "1024", + }, builder.parsedParams) + assert.Equal(t, &map[string]string{ + "A": "header_a", + "B": "Thu, 01 Sep 2016 07:30:00 GMT", + "Content-Type": "application/json", + }, builder.parsedHeaders) + assert.NotNil(t, httpRequest.Header.Get("Date")) + assert.Equal(t, "40", httpRequest.Header.Get("Content-Length")) + + buffer := &bytes.Buffer{} + buffer.ReadFrom(httpRequest.Body) + httpRequest.Body.Close() + assert.Equal(t, "{\"a\":\"element_a\",\"b\":\"element_b\",\"cd\":0}", buffer.String()) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go new file mode 100644 index 000000000..d17170e5f --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go @@ -0,0 +1,167 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package builder + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/pengsrc/go-shared/convert" + + "github.com/yunify/qingstor-sdk-go" + "github.com/yunify/qingstor-sdk-go/logger" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/utils" +) + +// QingStorBuilder is the request builder for QingStor service. +type QingStorBuilder struct { + baseBuilder *BaseBuilder +} + +// BuildHTTPRequest builds http request with an operation and an input. +func (qb *QingStorBuilder) BuildHTTPRequest(o *data.Operation, i *reflect.Value) (*http.Request, error) { + qb.baseBuilder = &BaseBuilder{} + qb.baseBuilder.operation = o + qb.baseBuilder.input = i + + _, err := qb.baseBuilder.parse() + if err != nil { + return nil, err + } + err = qb.parseURL() + if err != nil { + return nil, err + } + + httpRequest, err := http.NewRequest(qb.baseBuilder.operation.RequestMethod, + qb.baseBuilder.parsedURL, qb.baseBuilder.parsedBody) + if err != nil { + return nil, err + } + + err = qb.baseBuilder.setupHeaders(httpRequest) + if err != nil { + return nil, err + } + err = qb.setupHeaders(httpRequest) + if err != nil { + return nil, err + } + + logger.Info(fmt.Sprintf( + "Built QingStor request: [%d] %s", + convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822), + httpRequest.URL.String()), + ) + + logger.Info(fmt.Sprintf( + "QingStor request headers: [%d] %s", + convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822), + fmt.Sprint(httpRequest.Header)), + ) + + if qb.baseBuilder.parsedBodyString != "" { + logger.Info(fmt.Sprintf( + "QingStor request body string: [%d] %s", + convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822), + qb.baseBuilder.parsedBodyString), + ) + } + + return httpRequest, nil +} + +func (qb *QingStorBuilder) parseURL() error { + config := qb.baseBuilder.operation.Config + + zone := (*qb.baseBuilder.parsedProperties)["zone"] + port := strconv.Itoa(config.Port) + endpoint := config.Protocol + "://" + config.Host + ":" + port + if zone != "" { + endpoint = config.Protocol + "://" + zone + "." + config.Host + ":" + port + } + + requestURI := qb.baseBuilder.operation.RequestURI + for key, value := range *qb.baseBuilder.parsedProperties { + endpoint = strings.Replace(endpoint, "<"+key+">", utils.URLQueryEscape(value), -1) + requestURI = strings.Replace(requestURI, "<"+key+">", utils.URLQueryEscape(value), -1) + } + requestURI = regexp.MustCompile(`/+`).ReplaceAllString(requestURI, "/") + + requestURL, err := url.Parse(endpoint + requestURI) + if err != nil { + return err + } + + if qb.baseBuilder.parsedParams != nil { + queryValue := requestURL.Query() + for key, value := range *qb.baseBuilder.parsedParams { + queryValue.Set(key, value) + } + requestURL.RawQuery = queryValue.Encode() + } + + qb.baseBuilder.parsedURL = requestURL.String() + + return nil +} + +func (qb *QingStorBuilder) setupHeaders(httpRequest *http.Request) error { + method := httpRequest.Method + if method == "POST" || method == "PUT" || method == "DELETE" { + if httpRequest.Header.Get("Content-Type") == "" { + mimeType := mime.TypeByExtension(path.Ext(httpRequest.URL.Path)) + if mimeType != "" { + httpRequest.Header.Set("Content-Type", mimeType) + } + } + } + + if httpRequest.Header.Get("User-Agent") == "" { + version := fmt.Sprintf(`Go v%s`, strings.Replace(runtime.Version(), "go", "", -1)) + system := fmt.Sprintf(`%s_%s_%s`, runtime.GOOS, runtime.GOARCH, runtime.Compiler) + ua := fmt.Sprintf(`qingstor-sdk-go/%s (%s; %s)`, sdk.Version, version, system) + if qb.baseBuilder.operation.Config.AdditionalUserAgent != "" { + ua = fmt.Sprintf(`%s %s`, ua, qb.baseBuilder.operation.Config.AdditionalUserAgent) + } + httpRequest.Header.Set("User-Agent", ua) + } + + if qb.baseBuilder.operation.APIName == "Delete Multiple Objects" { + buffer := &bytes.Buffer{} + buffer.ReadFrom(httpRequest.Body) + httpRequest.Body = ioutil.NopCloser(bytes.NewReader(buffer.Bytes())) + + md5Value := md5.Sum(buffer.Bytes()) + httpRequest.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5Value[:])) + } + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor_test.go new file mode 100644 index 000000000..ec8cb6e8e --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor_test.go @@ -0,0 +1,83 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package builder + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +type ObjectSubServiceProperties struct { + BucketName *string `json:"bucket-name" name:"bucket-name"` + ObjectKey *string `json:"object-key" name:"object-key"` + Zone *string `json:"zone" name:"zone"` +} +type GetObjectInput struct { + IfMatch *string `json:"If-Match" name:"If-Match" location:"headers"` + IfModifiedSince *time.Time `json:"If-Modified-Since" name:"If-Modified-Since" format:"RFC 822" location:"headers"` + IfNoneMatch *string `json:"If-None-Match" name:"If-None-Match" location:"headers"` + IfUnmodifiedSince time.Time `json:"If-Unmodified-Since" name:"If-Unmodified-Since" format:"RFC 822" location:"headers"` + // Specified range of the Object + Range *string `json:"Range" name:"Range" location:"headers"` +} + +func (i *GetObjectInput) Validate() error { + return nil +} + +func TestQingStorBuilder_BuildHTTPRequest(t *testing.T) { + conf, err := config.NewDefault() + assert.Nil(t, err) + conf.Host = "qingstor.dev" + + tz, err := time.LoadLocation("Asia/Shanghai") + assert.Nil(t, err) + + qsBuilder := &QingStorBuilder{} + operation := &data.Operation{ + Config: conf, + APIName: "GET Object", + ServiceName: "QingStor", + Properties: &ObjectSubServiceProperties{ + BucketName: String("test"), + ObjectKey: String("path/to/key.txt"), + Zone: String("beta"), + }, + RequestMethod: "GET", + RequestURI: "//", + StatusCodes: []int{ + 201, + }, + } + inputValue := reflect.ValueOf(&GetObjectInput{ + IfModifiedSince: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, tz)), + Range: String("100-"), + }) + httpRequest, err := qsBuilder.BuildHTTPRequest(operation, &inputValue) + assert.Nil(t, err) + assert.NotNil(t, httpRequest.Header.Get("Date")) + assert.Equal(t, "0", httpRequest.Header.Get("Content-Length")) + assert.Equal(t, "", httpRequest.Header.Get("If-Match")) + assert.Equal(t, "Thu, 01 Sep 2016 07:30:00 GMT", httpRequest.Header.Get("If-Modified-Since")) + assert.Equal(t, "100-", httpRequest.Header.Get("Range")) + assert.Equal(t, "https://beta.qingstor.dev:443/test/path/to/key.txt", httpRequest.URL.String()) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/data/input.go b/vendor/github.com/yunify/qingstor-sdk-go/request/data/input.go new file mode 100644 index 000000000..80fb8aa1e --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/data/input.go @@ -0,0 +1,22 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package data + +// Input defines the interfaces that input should implement. +type Input interface { + Validation +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/data/operation.go b/vendor/github.com/yunify/qingstor-sdk-go/request/data/operation.go new file mode 100644 index 000000000..946fe869e --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/data/operation.go @@ -0,0 +1,35 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package data + +import ( + "github.com/yunify/qingstor-sdk-go/config" +) + +// Operation stores information of an operation. +type Operation struct { + Config *config.Config + Properties interface{} + + APIName string + ServiceName string + + RequestMethod string + RequestURI string + + StatusCodes []int +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/data/validation.go b/vendor/github.com/yunify/qingstor-sdk-go/request/data/validation.go new file mode 100644 index 000000000..f9954b347 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/data/validation.go @@ -0,0 +1,22 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package data + +// Validation defines the validate interface. +type Validation interface { + Validate() error +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/errors/parameters.go b/vendor/github.com/yunify/qingstor-sdk-go/request/errors/parameters.go new file mode 100644 index 000000000..1a4f57d67 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/errors/parameters.go @@ -0,0 +1,53 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package errors + +import ( + "fmt" + "strings" +) + +// ParameterRequiredError indicates that the required parameter is missing. +type ParameterRequiredError struct { + ParameterName string + ParentName string +} + +// Error returns the description of ParameterRequiredError. +func (e ParameterRequiredError) Error() string { + return fmt.Sprintf(`"%s" is required in "%s"`, e.ParameterName, e.ParentName) +} + +// ParameterValueNotAllowedError indicates that the parameter value is not allowed. +type ParameterValueNotAllowedError struct { + ParameterName string + ParameterValue string + AllowedValues []string +} + +// Error returns the description of ParameterValueNotAllowedError. +func (e ParameterValueNotAllowedError) Error() string { + allowedValues := []string{} + for _, value := range e.AllowedValues { + allowedValues = append(allowedValues, "\""+value+"\"") + } + return fmt.Sprintf( + `"%s" value "%s" is not allowed, should be one of %s`, + e.ParameterName, + e.ParameterValue, + strings.Join(allowedValues, ", ")) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/errors/qingstor.go b/vendor/github.com/yunify/qingstor-sdk-go/request/errors/qingstor.go new file mode 100644 index 000000000..2e9163b01 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/errors/qingstor.go @@ -0,0 +1,36 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package errors + +import "fmt" + +// QingStorError stores information of an QingStor error response. +type QingStorError struct { + StatusCode int + + Code string `json:"code"` + Message string `json:"message"` + RequestID string `json:"request_id"` + ReferenceURL string `json:"url"` +} + +// Error returns the description of QingStor error response. +func (qse QingStorError) Error() string { + return fmt.Sprintf( + "QingStor Error: StatusCode \"%d\", Code \"%s\", Message \"%s\", Request ID \"%s\", Reference URL \"%s\"", + qse.StatusCode, qse.Code, qse.Message, qse.RequestID, qse.ReferenceURL) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/request.go b/vendor/github.com/yunify/qingstor-sdk-go/request/request.go new file mode 100644 index 000000000..f731b0f6d --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/request.go @@ -0,0 +1,232 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package request + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/pengsrc/go-shared/convert" + + "github.com/yunify/qingstor-sdk-go/logger" + "github.com/yunify/qingstor-sdk-go/request/builder" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/signer" + "github.com/yunify/qingstor-sdk-go/request/unpacker" +) + +// A Request can build, sign, send and unpack API request. +type Request struct { + Operation *data.Operation + Input *reflect.Value + Output *reflect.Value + + HTTPRequest *http.Request + HTTPResponse *http.Response +} + +// New create a Request from given Operation, Input and Output. +// It returns a Request. +func New(o *data.Operation, i data.Input, x interface{}) (*Request, error) { + input := reflect.ValueOf(i) + if input.IsValid() && input.Elem().IsValid() { + err := i.Validate() + if err != nil { + return nil, err + } + } + output := reflect.ValueOf(x) + + return &Request{ + Operation: o, + Input: &input, + Output: &output, + }, nil +} + +// Send sends API request. +// It returns error if error occurred. +func (r *Request) Send() error { + err := r.check() + if err != nil { + return err + } + + err = r.build() + if err != nil { + return err + } + + err = r.sign() + if err != nil { + return err + } + + err = r.send() + if err != nil { + return err + } + + err = r.unpack() + if err != nil { + return err + } + + return nil +} + +// Sign sign the API request by setting the authorization header. +// It returns error if error occurred. +func (r *Request) Sign() error { + err := r.check() + if err != nil { + return err + } + + err = r.build() + if err != nil { + return err + } + + err = r.sign() + if err != nil { + return err + } + + return nil +} + +// SignQuery sign the API request by appending query string. +// It returns error if error occurred. +func (r *Request) SignQuery(timeoutSeconds int) error { + err := r.check() + if err != nil { + return err + } + + err = r.build() + if err != nil { + return err + } + + err = r.signQuery(int(time.Now().Unix()) + timeoutSeconds) + if err != nil { + return err + } + + return nil +} + +func (r *Request) check() error { + if r.Operation.Config.AccessKeyID == "" { + return errors.New("access key not provided") + } + + if r.Operation.Config.SecretAccessKey == "" { + return errors.New("secret access key not provided") + } + + return nil +} + +func (r *Request) build() error { + b := &builder.QingStorBuilder{} + httpRequest, err := b.BuildHTTPRequest(r.Operation, r.Input) + if err != nil { + return err + } + + r.HTTPRequest = httpRequest + return nil +} + +func (r *Request) sign() error { + s := &signer.QingStorSigner{ + AccessKeyID: r.Operation.Config.AccessKeyID, + SecretAccessKey: r.Operation.Config.SecretAccessKey, + } + err := s.WriteSignature(r.HTTPRequest) + if err != nil { + return err + } + + return nil +} + +func (r *Request) signQuery(expires int) error { + s := &signer.QingStorSigner{ + AccessKeyID: r.Operation.Config.AccessKeyID, + SecretAccessKey: r.Operation.Config.SecretAccessKey, + } + err := s.WriteQuerySignature(r.HTTPRequest, expires) + if err != nil { + return err + } + + return nil +} + +func (r *Request) send() error { + var response *http.Response + var err error + + if r.Operation.Config.Connection == nil { + return errors.New("connection not initialized") + } + + retries := r.Operation.Config.ConnectionRetries + 1 + for { + if retries > 0 { + logger.Info(fmt.Sprintf( + "Sending request: [%d] %s %s", + convert.StringToUnixTimestamp(r.HTTPRequest.Header.Get("Date"), convert.RFC822), + r.Operation.RequestMethod, + r.HTTPRequest.Host, + )) + + response, err = r.Operation.Config.Connection.Do(r.HTTPRequest) + if err == nil { + retries = 0 + } else { + retries-- + time.Sleep(time.Second) + } + } else { + break + } + } + if err != nil { + return err + } + + r.HTTPResponse = response + + return nil +} + +func (r *Request) unpack() error { + u := &unpacker.QingStorUnpacker{} + err := u.UnpackHTTPRequest(r.Operation, r.HTTPResponse, r.Output) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/request_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/request_test.go new file mode 100644 index 000000000..203cabddf --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/request_test.go @@ -0,0 +1,136 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package request + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/logger" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +type SomeActionProperties struct { + A *string `json:"a" name:"a"` + B *string `json:"b" name:"b"` + CD *string `json:"c-d" name:"c-d"` +} + +type SomeActionInput struct { + Date *time.Time `json:"Date" name:"Date" format:"RFC 822" location:"headers"` + IfModifiedSince *time.Time `json:"If-Modified-Since" name:"If-Modified-Since" format:"RFC 822" location:"headers"` + Range *string `json:"Range" name:"Range" location:"headers"` + UploadID *string `json:"upload_id" name:"upload_id" location:"params"` + Count *int `json:"count" name:"count" location:"elements"` +} + +func (s *SomeActionInput) Validate() error { + return nil +} + +type SomeActionOutput struct { + StatusCode *int `location:"statusCode"` + Error *errors.QingStorError + RequestID *string `location:"requestID"` +} + +func String(v string) *string { + return &v +} + +func Int(v int) *int { + return &v +} + +func Time(v time.Time) *time.Time { + return &v +} + +func TestRequest_Send(t *testing.T) { + conf, err := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY") + assert.Nil(t, err) + logger.SetLevel("warn") + + operation := &data.Operation{ + Config: conf, + Properties: &SomeActionProperties{ + A: String("aaa"), + B: String("bbb"), + CD: String("ccc-ddd"), + }, + APIName: "Some Action", + RequestMethod: "GET", + RequestURI: "///", + StatusCodes: []int{ + 200, // OK + 206, // Partial content + 304, // Not modified + 412, // Precondition failed + }, + } + + output := &SomeActionOutput{} + r, err := New(operation, &SomeActionInput{ + Date: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, time.UTC)), + IfModifiedSince: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, time.UTC)), + Range: String("100-"), + UploadID: String("0"), + Count: Int(23), + }, output) + assert.Nil(t, err) + + err = r.build() + assert.Nil(t, err) + + err = r.sign() + assert.Nil(t, err) + + assert.Equal(t, r.HTTPRequest.URL.String(), "https://qingstor.com:443/aaa/bbb/ccc-ddd?upload_id=0") + assert.Equal(t, r.HTTPRequest.Header.Get("Range"), "100-") + assert.Equal(t, r.HTTPRequest.Header.Get("If-Modified-Since"), "Thu, 01 Sep 2016 15:30:00 GMT") + assert.Equal(t, r.HTTPRequest.Header.Get("Content-Length"), "12") + assert.Equal(t, r.HTTPRequest.Header.Get("Authorization"), "QS ACCESS_KEY_ID:pA7G9qo4iQ6YHu7p4fX9Wcg4V9S6Mcgvz7p/0wEdz78=") + + httpResponse := &http.Response{Header: http.Header{}} + httpResponse.StatusCode = 400 + httpResponse.Header.Set("Content-Type", "application/json") + responseString := `{ + "code": "bad_request", + "message": "Invalid argument(s) or invalid argument value(s)", + "request_id": "1e588695254aa08cf7a43f612e6ce14b", + "url": "http://docs.qingcloud.com/object_storage/api/object/get.html" + }` + httpResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte(responseString))) + assert.Nil(t, err) + r.HTTPResponse = httpResponse + + err = r.unpack() + assert.NotNil(t, err) + + switch e := err.(type) { + case *errors.QingStorError: + assert.Equal(t, "bad_request", e.Code) + assert.Equal(t, "1e588695254aa08cf7a43f612e6ce14b", e.RequestID) + } +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor.go b/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor.go new file mode 100644 index 000000000..361491276 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor.go @@ -0,0 +1,259 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package signer + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/pengsrc/go-shared/convert" + + "github.com/yunify/qingstor-sdk-go/logger" + "github.com/yunify/qingstor-sdk-go/utils" +) + +// QingStorSigner is the http request signer for QingStor service. +type QingStorSigner struct { + AccessKeyID string + SecretAccessKey string +} + +// WriteSignature calculates signature and write it to http request header. +func (qss *QingStorSigner) WriteSignature(request *http.Request) error { + authorization, err := qss.BuildSignature(request) + if err != nil { + return err + } + + request.Header.Set("Authorization", authorization) + + return nil +} + +// WriteQuerySignature calculates signature and write it to http request url. +func (qss *QingStorSigner) WriteQuerySignature(request *http.Request, expires int) error { + query, err := qss.BuildQuerySignature(request, expires) + if err != nil { + return err + } + + if request.URL.RawQuery != "" { + query = "?" + request.URL.RawQuery + "&" + query + } else { + query = "?" + query + } + + newRequest, err := http.NewRequest(request.Method, + request.URL.Scheme+"://"+request.URL.Host+utils.URLQueryEscape(request.URL.Path)+query, nil) + if err != nil { + return err + } + request.URL = newRequest.URL + + return nil +} + +// BuildSignature calculates the signature string. +func (qss *QingStorSigner) BuildSignature(request *http.Request) (string, error) { + stringToSign, err := qss.BuildStringToSign(request) + if err != nil { + return "", err + } + + h := hmac.New(sha256.New, []byte(qss.SecretAccessKey)) + h.Write([]byte(stringToSign)) + + signature := strings.TrimSpace(base64.StdEncoding.EncodeToString(h.Sum(nil))) + authorization := "QS " + qss.AccessKeyID + ":" + signature + + logger.Debug(fmt.Sprintf( + "QingStor authorization: [%d] %s", + convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822), + authorization), + ) + + return authorization, nil +} + +// BuildQuerySignature calculates the signature string for query. +func (qss *QingStorSigner) BuildQuerySignature(request *http.Request, expires int) (string, error) { + stringToSign, err := qss.BuildQueryStringToSign(request, expires) + if err != nil { + return "", err + } + + h := hmac.New(sha256.New, []byte(qss.SecretAccessKey)) + h.Write([]byte(stringToSign)) + + signature := strings.TrimSpace(base64.StdEncoding.EncodeToString(h.Sum(nil))) + signature = utils.URLQueryEscape(signature) + query := fmt.Sprintf( + "access_key_id=%s&expires=%d&signature=%s", + qss.AccessKeyID, expires, signature, + ) + + logger.Debug(fmt.Sprintf( + "QingStor query signature: [%d] %s", + convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822), + query, + )) + + return query, nil +} + +// BuildStringToSign build the string to sign. +func (qss *QingStorSigner) BuildStringToSign(request *http.Request) (string, error) { + stringToSign := fmt.Sprintf( + "%s\n%s\n%s\n%s\n", + request.Method, + request.Header.Get("Content-MD5"), + request.Header.Get("Content-Type"), + request.Header.Get("Date"), + ) + + stringToSign += qss.buildCanonicalizedHeaders(request) + canonicalizedResource, err := qss.buildCanonicalizedResource(request) + if err != nil { + return "", err + } + stringToSign += canonicalizedResource + + logger.Debug(fmt.Sprintf( + "QingStor string to sign: [%d] %s", + convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822), + stringToSign, + )) + + return stringToSign, nil +} + +// BuildQueryStringToSign build the string to sign for query. +func (qss *QingStorSigner) BuildQueryStringToSign(request *http.Request, expires int) (string, error) { + stringToSign := fmt.Sprintf( + "%s\n%s\n%s\n%d\n", + request.Method, + request.Header.Get("Content-MD5"), + request.Header.Get("Content-Type"), + expires, + ) + + stringToSign += qss.buildCanonicalizedHeaders(request) + canonicalizedResource, err := qss.buildCanonicalizedResource(request) + if err != nil { + return "", err + } + stringToSign += canonicalizedResource + + logger.Debug(fmt.Sprintf( + "QingStor query string to sign: [%d] %s", + convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822), + stringToSign, + )) + + return stringToSign, nil +} + +func (qss *QingStorSigner) buildCanonicalizedHeaders(request *http.Request) string { + keys := []string{} + for key := range request.Header { + if strings.HasPrefix(strings.ToLower(key), "x-qs-") { + keys = append(keys, strings.TrimSpace(strings.ToLower(key))) + } + } + + sort.Strings(keys) + + canonicalizedHeaders := "" + for _, key := range keys { + canonicalizedHeaders += key + ":" + strings.TrimSpace(request.Header.Get(key)) + "\n" + } + + return canonicalizedHeaders +} + +func (qss *QingStorSigner) buildCanonicalizedResource(request *http.Request) (string, error) { + path := utils.URLQueryEscape(request.URL.Path) + query := request.URL.Query() + + keys := []string{} + for key := range query { + keys = append(keys, key) + } + + sort.Strings(keys) + + parts := []string{} + for _, key := range keys { + values := query[key] + if qss.paramsToSign(key) { + if len(values) > 0 { + if values[0] != "" { + value := strings.TrimSpace(strings.Join(values, "")) + value, err := utils.URLQueryUnescape(value) + if err != nil { + return "", err + } + parts = append(parts, key+"="+value) + } else { + parts = append(parts, key) + } + } else { + parts = append(parts, key) + } + } + } + + joinedParts := strings.Join(parts, "&") + if joinedParts != "" { + path = path + "?" + joinedParts + } + + logger.Debug(fmt.Sprintf( + "QingStor canonicalized resource: [%d] %s", + convert.StringToUnixTimestamp(request.Header.Get("Date"), convert.RFC822), + path, + )) + + return path, nil +} + +func (qss *QingStorSigner) paramsToSign(key string) bool { + keysMap := map[string]bool{ + "acl": true, + "cors": true, + "delete": true, + "mirror": true, + "part_number": true, + "policy": true, + "stats": true, + "upload_id": true, + "uploads": true, + "response-expires": true, + "response-cache-control": true, + "response-content-type": true, + "response-content-language": true, + "response-content-encoding": true, + "response-content-disposition": true, + } + + return keysMap[key] +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor_test.go new file mode 100644 index 000000000..2d159afb7 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/signer/qingstor_test.go @@ -0,0 +1,85 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package signer + +import ( + "net/http" + "testing" + "time" + + "github.com/pengsrc/go-shared/convert" + + "github.com/stretchr/testify/assert" +) + +func TestQingStorSignerWriteSignature(t *testing.T) { + url := "https://qingstor.com/?acl&upload_id=fde133b5f6d932cd9c79bac3c7318da1&part_number=0&other=abc" + httpRequest, err := http.NewRequest("GET", url, nil) + httpRequest.Header.Set("Date", convert.TimeToString(time.Time{}, convert.RFC822)) + httpRequest.Header.Set("X-QS-Test-2", "Test 2") + httpRequest.Header.Set("X-QS-Test-1", "Test 1") + assert.Nil(t, err) + + s := QingStorSigner{ + AccessKeyID: "ENV_ACCESS_KEY_ID", + SecretAccessKey: "ENV_SECRET_ACCESS_KEY", + } + + err = s.WriteSignature(httpRequest) + assert.Nil(t, err) + + signature := "QS ENV_ACCESS_KEY_ID:bvglZF9iMOv1RaCTxPYWxexmt1UN2m5WKngYnhDEp2c=" + assert.Equal(t, signature, httpRequest.Header.Get("Authorization")) +} + +func TestQingStorSignerWriteSignatureChinese(t *testing.T) { + url := "https://zone.qingstor.com/bucket-name/中文" + httpRequest, err := http.NewRequest("GET", url, nil) + httpRequest.Header.Set("Date", convert.TimeToString(time.Time{}, convert.RFC822)) + assert.Nil(t, err) + + s := QingStorSigner{ + AccessKeyID: "ENV_ACCESS_KEY_ID", + SecretAccessKey: "ENV_SECRET_ACCESS_KEY", + } + + err = s.WriteSignature(httpRequest) + assert.Nil(t, err) + + signature := "QS ENV_ACCESS_KEY_ID:XsTXX50kzqBf92zLG1aIUIJmZ0hqIHoaHgkumwnV3fs=" + assert.Equal(t, signature, httpRequest.Header.Get("Authorization")) +} + +func TestQingStorSignerWriteQuerySignature(t *testing.T) { + url := "https://qingstor.com/?acl&upload_id=fde133b5f6d932cd9c79bac3c7318da1&part_number=0" + httpRequest, err := http.NewRequest("GET", url, nil) + httpRequest.Header.Set("Date", convert.TimeToString(time.Time{}, convert.RFC822)) + httpRequest.Header.Set("X-QS-Test-2", "Test 2") + httpRequest.Header.Set("X-QS-Test-1", "Test 1") + assert.Nil(t, err) + + s := QingStorSigner{ + AccessKeyID: "ENV_ACCESS_KEY_ID", + SecretAccessKey: "ENV_SECRET_ACCESS_KEY", + } + + err = s.WriteQuerySignature(httpRequest, 3600) + assert.Nil(t, err) + + targetURL := "https://qingstor.com/?acl&upload_id=fde133b5f6d932cd9c79bac3c7318da1&part_number=0&access_key_id=ENV_ACCESS_KEY_ID&expires=3600&signature=GRL3p3NOgHR9CQygASvyo344vdnO1hFke6ZvQ5mDVHM=" + assert.Equal(t, httpRequest.URL.String(), targetURL) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base.go b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base.go new file mode 100644 index 000000000..18920512d --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base.go @@ -0,0 +1,201 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package unpacker + +import ( + "bytes" + "fmt" + "net/http" + "reflect" + "strconv" + "time" + + "github.com/pengsrc/go-shared/convert" + "github.com/pengsrc/go-shared/json" + + "github.com/yunify/qingstor-sdk-go/logger" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +// BaseUnpacker is the base unpacker for all services. +type BaseUnpacker struct { + operation *data.Operation + httpResponse *http.Response + output *reflect.Value +} + +// UnpackHTTPRequest unpacks http response with an operation and an output. +func (b *BaseUnpacker) UnpackHTTPRequest(o *data.Operation, r *http.Response, x *reflect.Value) error { + b.operation = o + b.httpResponse = r + b.output = x + + err := b.exposeStatusCode() + if err != nil { + return err + } + err = b.parseResponseHeaders() + if err != nil { + return err + } + err = b.parseResponseBody() + if err != nil { + return err + } + err = b.parseResponseElements() + if err != nil { + return err + } + + return nil +} + +func (b *BaseUnpacker) exposeStatusCode() error { + value := b.output.Elem().FieldByName("StatusCode") + if value.IsValid() { + switch value.Interface().(type) { + case *int: + logger.Info(fmt.Sprintf( + "QingStor response status code: [%d] %d", + convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822), + b.httpResponse.StatusCode, + )) + value.Set(reflect.ValueOf(&b.httpResponse.StatusCode)) + } + } + + return nil +} + +func (b *BaseUnpacker) parseResponseHeaders() error { + logger.Info(fmt.Sprintf( + "QingStor response headers: [%d] %s", + convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822), + fmt.Sprint(b.httpResponse.Header), + )) + + if b.isResponseRight() { + fields := b.output.Elem() + for i := 0; i < fields.NumField(); i++ { + field := fields.Field(i) + fieldTagName := fields.Type().Field(i).Tag.Get("name") + fieldTagLocation := fields.Type().Field(i).Tag.Get("location") + fieldStringValue := b.httpResponse.Header.Get(fieldTagName) + + if fieldTagName != "" && fieldTagLocation == "headers" { + switch field.Interface().(type) { + case *string: + field.Set(reflect.ValueOf(&fieldStringValue)) + case *int: + intValue, err := strconv.Atoi(fieldStringValue) + if err != nil { + return err + } + field.Set(reflect.ValueOf(&intValue)) + case *int64: + int64Value, err := strconv.ParseInt(fieldStringValue, 10, 64) + if err != nil { + return err + } + field.Set(reflect.ValueOf(&int64Value)) + case *bool: + case *time.Time: + formatString := fields.Type().Field(i).Tag.Get("format") + format := "" + switch formatString { + case "RFC 822": + format = convert.RFC822 + case "ISO 8601": + format = convert.ISO8601 + } + timeValue, err := convert.StringToTime(fieldStringValue, format) + if err != nil { + return err + } + field.Set(reflect.ValueOf(&timeValue)) + } + } + } + } + + return nil +} + +func (b *BaseUnpacker) parseResponseBody() error { + if b.isResponseRight() { + value := b.output.Elem().FieldByName("Body") + if value.IsValid() { + switch value.Type().String() { + case "string": + buffer := &bytes.Buffer{} + buffer.ReadFrom(b.httpResponse.Body) + b.httpResponse.Body.Close() + + logger.Info(fmt.Sprintf( + "QingStor response body string: [%d] %s", + convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822), + string(buffer.Bytes()), + )) + + value.SetString(string(buffer.Bytes())) + case "io.ReadCloser": + value.Set(reflect.ValueOf(b.httpResponse.Body)) + } + } + } + + return nil +} + +func (b *BaseUnpacker) parseResponseElements() error { + if b.isResponseRight() { + if b.httpResponse.Header.Get("Content-Type") == "application/json" { + buffer := &bytes.Buffer{} + buffer.ReadFrom(b.httpResponse.Body) + b.httpResponse.Body.Close() + + logger.Info(fmt.Sprintf( + "QingStor response body string: [%d] %s", + convert.StringToUnixTimestamp(b.httpResponse.Header.Get("Date"), convert.RFC822), + string(buffer.Bytes()), + )) + + _, err := json.Decode(buffer.Bytes(), b.output.Interface()) + if err != nil { + return err + } + } + } + + return nil +} + +func (b *BaseUnpacker) isResponseRight() bool { + rightStatusCodes := b.operation.StatusCodes + if len(rightStatusCodes) == 0 { + rightStatusCodes = append(rightStatusCodes, 200) + } + + flag := false + for _, statusCode := range rightStatusCodes { + if statusCode == b.httpResponse.StatusCode { + flag = true + } + } + + return flag +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base_test.go new file mode 100644 index 000000000..7fa5f25a8 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/base_test.go @@ -0,0 +1,85 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package unpacker + +import ( + "bytes" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +func TestBaseUnpacker_UnpackHTTPRequest(t *testing.T) { + type FakeOutput struct { + StatusCode *int + + A *string `location:"elements" json:"a" name:"a"` + B *string `location:"elements" json:"b" name:"b"` + CD *int `location:"elements" json:"cd" name:"cd"` + EF *int64 `location:"elements" json:"ef" name:"ef"` + } + + httpResponse := &http.Response{Header: http.Header{}} + httpResponse.StatusCode = 200 + httpResponse.Header.Set("Content-Type", "application/json") + responseString := `{"a": "el_a", "b": "el_b", "cd": 1024, "ef": 2048}` + httpResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte(responseString))) + + output := &FakeOutput{} + outputValue := reflect.ValueOf(output) + unpacker := BaseUnpacker{} + err := unpacker.UnpackHTTPRequest(&data.Operation{}, httpResponse, &outputValue) + assert.Nil(t, err) + assert.Equal(t, 200, IntValue(output.StatusCode)) + assert.Equal(t, "el_a", StringValue(output.A)) + assert.Equal(t, "el_b", StringValue(output.B)) + assert.Equal(t, 1024, IntValue(output.CD)) + assert.Equal(t, int64(2048), Int64Value(output.EF)) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor.go b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor.go new file mode 100644 index 000000000..6d4c8e768 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor.go @@ -0,0 +1,72 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package unpacker + +import ( + "bytes" + "net/http" + "reflect" + + "github.com/pengsrc/go-shared/json" + + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +// QingStorUnpacker is the response unpacker for QingStor service. +type QingStorUnpacker struct { + baseUnpacker *BaseUnpacker +} + +// UnpackHTTPRequest unpack the http response with an operation, http response and an output. +func (qu *QingStorUnpacker) UnpackHTTPRequest(o *data.Operation, r *http.Response, x *reflect.Value) error { + qu.baseUnpacker = &BaseUnpacker{} + err := qu.baseUnpacker.UnpackHTTPRequest(o, r, x) + if err != nil { + return err + } + + err = qu.parseError() + if err != nil { + return err + } + + return nil +} + +func (qu *QingStorUnpacker) parseError() error { + if !qu.baseUnpacker.isResponseRight() { + if qu.baseUnpacker.httpResponse.Header.Get("Content-Type") == "application/json" { + buffer := &bytes.Buffer{} + buffer.ReadFrom(qu.baseUnpacker.httpResponse.Body) + qu.baseUnpacker.httpResponse.Body.Close() + + qsError := &errors.QingStorError{} + if buffer.Len() > 0 { + _, err := json.Decode(buffer.Bytes(), qsError) + if err != nil { + return err + } + } + qsError.StatusCode = qu.baseUnpacker.httpResponse.StatusCode + + return qsError + } + } + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor_test.go b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor_test.go new file mode 100644 index 000000000..d11266522 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/request/unpacker/qingstor_test.go @@ -0,0 +1,124 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package unpacker + +import ( + "bytes" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +func TestQingStorUnpacker_UnpackHTTPRequest(t *testing.T) { + type Bucket struct { + // Created time of the Bucket + Created *time.Time `json:"created" name:"created" format:"RFC 822"` + // QingCloud Zone ID + Location *string `json:"location" name:"location"` + // Bucket name + Name *string `json:"name" name:"name"` + // URL to access the Bucket + URL *string `json:"url" name:"url"` + } + + type ListBucketsOutput struct { + StatusCode *int `location:"statusCode"` + Error *errors.QingStorError + RequestID *string `location:"requestID"` + + XTestHeader *string `json:"X-Test-Header" name:"X-Test-Header" location:"headers"` + XTestTime *time.Time `json:"X-Test-Time" name:"X-Test-Time" format:"RFC 822" location:"headers"` + + // Buckets information + Buckets []*Bucket `json:"buckets" name:"buckets"` + // Bucket count + Count *int `json:"count" name:"count"` + } + + httpResponse := &http.Response{Header: http.Header{ + "X-Test-Header": []string{"test-header"}, + "X-Test-Time": []string{"Thu, 01 Sep 2016 07:30:00 GMT"}, + }} + httpResponse.StatusCode = 200 + httpResponse.Header.Set("Content-Type", "application/json") + responseString := `{ + "count": 2, + "buckets": [ + { + "name": "test-bucket", + "location": "pek3a", + "url": "https://test-bucket.pek3a.qingstor.com", + "created": "2015-07-11T04:45:57Z" + }, + { + "name": "test-photos", + "location": "pek3a", + "url": "https://test-photos.pek3a.qingstor.com", + "created": "2015-07-12T09:40:32Z" + } + ] + }` + httpResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte(responseString))) + + output := &ListBucketsOutput{} + outputValue := reflect.ValueOf(output) + unpacker := QingStorUnpacker{} + err := unpacker.UnpackHTTPRequest(&data.Operation{}, httpResponse, &outputValue) + assert.Nil(t, err) + assert.Equal(t, "test-header", StringValue(output.XTestHeader)) + assert.Equal(t, time.Date(2016, 9, 1, 7, 30, 0, 0, time.UTC), TimeValue(output.XTestTime)) + assert.Equal(t, 2, IntValue(output.Count)) + assert.Equal(t, "test-bucket", StringValue(output.Buckets[0].Name)) + assert.Equal(t, "pek3a", StringValue(output.Buckets[0].Location)) + assert.Equal(t, time.Date(2015, 7, 12, 9, 40, 32, 0, time.UTC), TimeValue(output.Buckets[1].Created)) +} + +func TestQingStorUnpacker_UnpackHTTPRequestWithError(t *testing.T) { + type ListBucketsOutput struct { + StatusCode *int `location:"statusCode"` + Error *errors.QingStorError + RequestID *string `location:"requestID"` + } + + httpResponse := &http.Response{Header: http.Header{}} + httpResponse.StatusCode = 400 + httpResponse.Header.Set("Content-Type", "application/json") + responseString := `{ + "code": "bad_request", + "message": "Invalid argument(s) or invalid argument value(s)", + "request_id": "aa08cf7a43f611e5886952542e6ce14b", + "url": "http://docs.qingcloud.com/object_storage/api/bucket/get.html" + }` + httpResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte(responseString))) + + output := &ListBucketsOutput{} + outputValue := reflect.ValueOf(output) + unpacker := QingStorUnpacker{} + err := unpacker.UnpackHTTPRequest(&data.Operation{}, httpResponse, &outputValue) + assert.NotNil(t, err) + switch e := err.(type) { + case *errors.QingStorError: + assert.Equal(t, "bad_request", e.Code) + assert.Equal(t, "aa08cf7a43f611e5886952542e6ce14b", e.RequestID) + } +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go b/vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go new file mode 100644 index 000000000..addca536f --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go @@ -0,0 +1,1218 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package service + +import ( + "fmt" + "io" + "time" + + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +var _ fmt.State +var _ io.Reader +var _ time.Time +var _ config.Config + +// Bucket presents bucket. +type Bucket struct { + Config *config.Config + Properties *Properties +} + +// Bucket initializes a new bucket. +func (s *Service) Bucket(bucketName string, zone string) (*Bucket, error) { + properties := &Properties{ + BucketName: &bucketName, + Zone: &zone, + } + + return &Bucket{Config: s.Config, Properties: properties}, nil +} + +// Delete does Delete a bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/delete.html +func (s *Bucket) Delete() (*DeleteBucketOutput, error) { + r, x, err := s.DeleteRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeleteRequest creates request and output object of DeleteBucket. +func (s *Bucket) DeleteRequest() (*request.Request, *DeleteBucketOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "DELETE Bucket", + RequestMethod: "DELETE", + RequestURI: "/", + StatusCodes: []int{ + 204, // Bucket deleted + }, + } + + x := &DeleteBucketOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteBucketOutput presents output for DeleteBucket. +type DeleteBucketOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// DeleteCORS does Delete CORS information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/cors/delete_cors.html +func (s *Bucket) DeleteCORS() (*DeleteBucketCORSOutput, error) { + r, x, err := s.DeleteCORSRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeleteCORSRequest creates request and output object of DeleteBucketCORS. +func (s *Bucket) DeleteCORSRequest() (*request.Request, *DeleteBucketCORSOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "DELETE Bucket CORS", + RequestMethod: "DELETE", + RequestURI: "/?cors", + StatusCodes: []int{ + 204, // OK + }, + } + + x := &DeleteBucketCORSOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteBucketCORSOutput presents output for DeleteBucketCORS. +type DeleteBucketCORSOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// DeleteExternalMirror does Delete external mirror of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/external_mirror/delete_external_mirror.html +func (s *Bucket) DeleteExternalMirror() (*DeleteBucketExternalMirrorOutput, error) { + r, x, err := s.DeleteExternalMirrorRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeleteExternalMirrorRequest creates request and output object of DeleteBucketExternalMirror. +func (s *Bucket) DeleteExternalMirrorRequest() (*request.Request, *DeleteBucketExternalMirrorOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "DELETE Bucket External Mirror", + RequestMethod: "DELETE", + RequestURI: "/?mirror", + StatusCodes: []int{ + 204, // No content + }, + } + + x := &DeleteBucketExternalMirrorOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteBucketExternalMirrorOutput presents output for DeleteBucketExternalMirror. +type DeleteBucketExternalMirrorOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// DeletePolicy does Delete policy information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/policy/delete_policy.html +func (s *Bucket) DeletePolicy() (*DeleteBucketPolicyOutput, error) { + r, x, err := s.DeletePolicyRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeletePolicyRequest creates request and output object of DeleteBucketPolicy. +func (s *Bucket) DeletePolicyRequest() (*request.Request, *DeleteBucketPolicyOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "DELETE Bucket Policy", + RequestMethod: "DELETE", + RequestURI: "/?policy", + StatusCodes: []int{ + 204, // No content + }, + } + + x := &DeleteBucketPolicyOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteBucketPolicyOutput presents output for DeleteBucketPolicy. +type DeleteBucketPolicyOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// DeleteMultipleObjects does Delete multiple objects from the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/delete_multiple.html +func (s *Bucket) DeleteMultipleObjects(input *DeleteMultipleObjectsInput) (*DeleteMultipleObjectsOutput, error) { + r, x, err := s.DeleteMultipleObjectsRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeleteMultipleObjectsRequest creates request and output object of DeleteMultipleObjects. +func (s *Bucket) DeleteMultipleObjectsRequest(input *DeleteMultipleObjectsInput) (*request.Request, *DeleteMultipleObjectsOutput, error) { + + if input == nil { + input = &DeleteMultipleObjectsInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "Delete Multiple Objects", + RequestMethod: "POST", + RequestURI: "/?delete", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &DeleteMultipleObjectsOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteMultipleObjectsInput presents input for DeleteMultipleObjects. +type DeleteMultipleObjectsInput struct { + + // A list of keys to delete + Objects []*KeyType `json:"objects" name:"objects" location:"elements"` // Required + // Whether to return the list of deleted objects + Quiet *bool `json:"quiet,omitempty" name:"quiet" location:"elements"` +} + +// Validate validates the input for DeleteMultipleObjects. +func (v *DeleteMultipleObjectsInput) Validate() error { + + if len(v.Objects) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "Objects", + ParentName: "DeleteMultipleObjectsInput", + } + } + + if len(v.Objects) > 0 { + for _, property := range v.Objects { + if err := property.Validate(); err != nil { + return err + } + } + } + + return nil +} + +// DeleteMultipleObjectsOutput presents output for DeleteMultipleObjects. +type DeleteMultipleObjectsOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // List of deleted objects + Deleted []*KeyType `json:"deleted,omitempty" name:"deleted" location:"elements"` + // Error messages + Errors []*KeyDeleteErrorType `json:"errors,omitempty" name:"errors" location:"elements"` +} + +// GetACL does Get ACL information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/get_acl.html +func (s *Bucket) GetACL() (*GetBucketACLOutput, error) { + r, x, err := s.GetACLRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetACLRequest creates request and output object of GetBucketACL. +func (s *Bucket) GetACLRequest() (*request.Request, *GetBucketACLOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket ACL", + RequestMethod: "GET", + RequestURI: "/?acl", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &GetBucketACLOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetBucketACLOutput presents output for GetBucketACL. +type GetBucketACLOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Bucket ACL rules + ACL []*ACLType `json:"acl,omitempty" name:"acl" location:"elements"` + // Bucket owner + Owner *OwnerType `json:"owner,omitempty" name:"owner" location:"elements"` +} + +// GetCORS does Get CORS information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/cors/get_cors.html +func (s *Bucket) GetCORS() (*GetBucketCORSOutput, error) { + r, x, err := s.GetCORSRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetCORSRequest creates request and output object of GetBucketCORS. +func (s *Bucket) GetCORSRequest() (*request.Request, *GetBucketCORSOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket CORS", + RequestMethod: "GET", + RequestURI: "/?cors", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &GetBucketCORSOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetBucketCORSOutput presents output for GetBucketCORS. +type GetBucketCORSOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Bucket CORS rules + CORSRules []*CORSRuleType `json:"cors_rules,omitempty" name:"cors_rules" location:"elements"` +} + +// GetExternalMirror does Get external mirror of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/external_mirror/get_external_mirror.html +func (s *Bucket) GetExternalMirror() (*GetBucketExternalMirrorOutput, error) { + r, x, err := s.GetExternalMirrorRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetExternalMirrorRequest creates request and output object of GetBucketExternalMirror. +func (s *Bucket) GetExternalMirrorRequest() (*request.Request, *GetBucketExternalMirrorOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket External Mirror", + RequestMethod: "GET", + RequestURI: "/?mirror", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &GetBucketExternalMirrorOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetBucketExternalMirrorOutput presents output for GetBucketExternalMirror. +type GetBucketExternalMirrorOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Source site url + SourceSite *string `json:"source_site,omitempty" name:"source_site" location:"elements"` +} + +// GetPolicy does Get policy information of the bucket. +// Documentation URL: https://https://docs.qingcloud.com/qingstor/api/bucket/policy/get_policy.html +func (s *Bucket) GetPolicy() (*GetBucketPolicyOutput, error) { + r, x, err := s.GetPolicyRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetPolicyRequest creates request and output object of GetBucketPolicy. +func (s *Bucket) GetPolicyRequest() (*request.Request, *GetBucketPolicyOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket Policy", + RequestMethod: "GET", + RequestURI: "/?policy", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &GetBucketPolicyOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetBucketPolicyOutput presents output for GetBucketPolicy. +type GetBucketPolicyOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Bucket policy statement + Statement []*StatementType `json:"statement,omitempty" name:"statement" location:"elements"` +} + +// GetStatistics does Get statistics information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/get_stats.html +func (s *Bucket) GetStatistics() (*GetBucketStatisticsOutput, error) { + r, x, err := s.GetStatisticsRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetStatisticsRequest creates request and output object of GetBucketStatistics. +func (s *Bucket) GetStatisticsRequest() (*request.Request, *GetBucketStatisticsOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket Statistics", + RequestMethod: "GET", + RequestURI: "/?stats", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &GetBucketStatisticsOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetBucketStatisticsOutput presents output for GetBucketStatistics. +type GetBucketStatisticsOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Objects count in the bucket + Count *int64 `json:"count,omitempty" name:"count" location:"elements"` + // Bucket created time + Created *time.Time `json:"created,omitempty" name:"created" format:"ISO 8601" location:"elements"` + // QingCloud Zone ID + Location *string `json:"location,omitempty" name:"location" location:"elements"` + // Bucket name + Name *string `json:"name,omitempty" name:"name" location:"elements"` + // Bucket storage size + Size *int64 `json:"size,omitempty" name:"size" location:"elements"` + // Bucket status + // Status's available values: active, suspended + Status *string `json:"status,omitempty" name:"status" location:"elements"` + // URL to access the bucket + URL *string `json:"url,omitempty" name:"url" location:"elements"` +} + +// Head does Check whether the bucket exists and available. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/head.html +func (s *Bucket) Head() (*HeadBucketOutput, error) { + r, x, err := s.HeadRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// HeadRequest creates request and output object of HeadBucket. +func (s *Bucket) HeadRequest() (*request.Request, *HeadBucketOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "HEAD Bucket", + RequestMethod: "HEAD", + RequestURI: "/", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &HeadBucketOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// HeadBucketOutput presents output for HeadBucket. +type HeadBucketOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// ListMultipartUploads does List multipart uploads in the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/list_multipart_uploads.html +func (s *Bucket) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + r, x, err := s.ListMultipartUploadsRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// ListMultipartUploadsRequest creates request and output object of ListMultipartUploads. +func (s *Bucket) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (*request.Request, *ListMultipartUploadsOutput, error) { + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "List Multipart Uploads", + RequestMethod: "GET", + RequestURI: "/?uploads", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &ListMultipartUploadsOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// ListMultipartUploadsInput presents input for ListMultipartUploads. +type ListMultipartUploadsInput struct { + // Put all keys that share a common prefix into a list + Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"params"` + // Results count limit + Limit *int `json:"limit,omitempty" name:"limit" location:"params"` + // Limit results to keys that start at this marker + Marker *string `json:"marker,omitempty" name:"marker" location:"params"` + // Limits results to keys that begin with the prefix + Prefix *string `json:"prefix,omitempty" name:"prefix" location:"params"` +} + +// Validate validates the input for ListMultipartUploads. +func (v *ListMultipartUploadsInput) Validate() error { + + return nil +} + +// ListMultipartUploadsOutput presents output for ListMultipartUploads. +type ListMultipartUploadsOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Other object keys that share common prefixes + CommonPrefixes []*string `json:"common_prefixes,omitempty" name:"common_prefixes" location:"elements"` + // Delimiter that specified in request parameters + Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"elements"` + // Limit that specified in request parameters + Limit *int `json:"limit,omitempty" name:"limit" location:"elements"` + // Marker that specified in request parameters + Marker *string `json:"marker,omitempty" name:"marker" location:"elements"` + // Bucket name + Name *string `json:"name,omitempty" name:"name" location:"elements"` + // The last key in keys list + NextMarker *string `json:"next_marker,omitempty" name:"next_marker" location:"elements"` + // Prefix that specified in request parameters + Prefix *string `json:"prefix,omitempty" name:"prefix" location:"elements"` + // Multipart uploads + Uploads []*UploadsType `json:"uploads,omitempty" name:"uploads" location:"elements"` +} + +// ListObjects does Retrieve the object list in a bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/get.html +func (s *Bucket) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + r, x, err := s.ListObjectsRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// ListObjectsRequest creates request and output object of ListObjects. +func (s *Bucket) ListObjectsRequest(input *ListObjectsInput) (*request.Request, *ListObjectsOutput, error) { + + if input == nil { + input = &ListObjectsInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Bucket (List Objects)", + RequestMethod: "GET", + RequestURI: "/", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &ListObjectsOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// ListObjectsInput presents input for ListObjects. +type ListObjectsInput struct { + // Put all keys that share a common prefix into a list + Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"params"` + // Results count limit + Limit *int `json:"limit,omitempty" name:"limit" location:"params"` + // Limit results to keys that start at this marker + Marker *string `json:"marker,omitempty" name:"marker" location:"params"` + // Limits results to keys that begin with the prefix + Prefix *string `json:"prefix,omitempty" name:"prefix" location:"params"` +} + +// Validate validates the input for ListObjects. +func (v *ListObjectsInput) Validate() error { + + return nil +} + +// ListObjectsOutput presents output for ListObjects. +type ListObjectsOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Other object keys that share common prefixes + CommonPrefixes []*string `json:"common_prefixes,omitempty" name:"common_prefixes" location:"elements"` + // Delimiter that specified in request parameters + Delimiter *string `json:"delimiter,omitempty" name:"delimiter" location:"elements"` + // Object keys + Keys []*KeyType `json:"keys,omitempty" name:"keys" location:"elements"` + // Limit that specified in request parameters + Limit *int `json:"limit,omitempty" name:"limit" location:"elements"` + // Marker that specified in request parameters + Marker *string `json:"marker,omitempty" name:"marker" location:"elements"` + // Bucket name + Name *string `json:"name,omitempty" name:"name" location:"elements"` + // The last key in keys list + NextMarker *string `json:"next_marker,omitempty" name:"next_marker" location:"elements"` + // Bucket owner + Owner *OwnerType `json:"owner,omitempty" name:"owner" location:"elements"` + // Prefix that specified in request parameters + Prefix *string `json:"prefix,omitempty" name:"prefix" location:"elements"` +} + +// Put does Create a new bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/put.html +func (s *Bucket) Put() (*PutBucketOutput, error) { + r, x, err := s.PutRequest() + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutRequest creates request and output object of PutBucket. +func (s *Bucket) PutRequest() (*request.Request, *PutBucketOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Bucket", + RequestMethod: "PUT", + RequestURI: "/", + StatusCodes: []int{ + 201, // Bucket created + }, + } + + x := &PutBucketOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutBucketOutput presents output for PutBucket. +type PutBucketOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// PutACL does Set ACL information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/put_acl.html +func (s *Bucket) PutACL(input *PutBucketACLInput) (*PutBucketACLOutput, error) { + r, x, err := s.PutACLRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutACLRequest creates request and output object of PutBucketACL. +func (s *Bucket) PutACLRequest(input *PutBucketACLInput) (*request.Request, *PutBucketACLOutput, error) { + + if input == nil { + input = &PutBucketACLInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Bucket ACL", + RequestMethod: "PUT", + RequestURI: "/?acl", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &PutBucketACLOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutBucketACLInput presents input for PutBucketACL. +type PutBucketACLInput struct { + // Bucket ACL rules + ACL []*ACLType `json:"acl" name:"acl" location:"elements"` // Required + +} + +// Validate validates the input for PutBucketACL. +func (v *PutBucketACLInput) Validate() error { + + if len(v.ACL) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "ACL", + ParentName: "PutBucketACLInput", + } + } + + if len(v.ACL) > 0 { + for _, property := range v.ACL { + if err := property.Validate(); err != nil { + return err + } + } + } + + return nil +} + +// PutBucketACLOutput presents output for PutBucketACL. +type PutBucketACLOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// PutCORS does Set CORS information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/cors/put_cors.html +func (s *Bucket) PutCORS(input *PutBucketCORSInput) (*PutBucketCORSOutput, error) { + r, x, err := s.PutCORSRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutCORSRequest creates request and output object of PutBucketCORS. +func (s *Bucket) PutCORSRequest(input *PutBucketCORSInput) (*request.Request, *PutBucketCORSOutput, error) { + + if input == nil { + input = &PutBucketCORSInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Bucket CORS", + RequestMethod: "PUT", + RequestURI: "/?cors", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &PutBucketCORSOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutBucketCORSInput presents input for PutBucketCORS. +type PutBucketCORSInput struct { + // Bucket CORS rules + CORSRules []*CORSRuleType `json:"cors_rules" name:"cors_rules" location:"elements"` // Required + +} + +// Validate validates the input for PutBucketCORS. +func (v *PutBucketCORSInput) Validate() error { + + if len(v.CORSRules) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "CORSRules", + ParentName: "PutBucketCORSInput", + } + } + + if len(v.CORSRules) > 0 { + for _, property := range v.CORSRules { + if err := property.Validate(); err != nil { + return err + } + } + } + + return nil +} + +// PutBucketCORSOutput presents output for PutBucketCORS. +type PutBucketCORSOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// PutExternalMirror does Set external mirror of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/external_mirror/put_external_mirror.html +func (s *Bucket) PutExternalMirror(input *PutBucketExternalMirrorInput) (*PutBucketExternalMirrorOutput, error) { + r, x, err := s.PutExternalMirrorRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutExternalMirrorRequest creates request and output object of PutBucketExternalMirror. +func (s *Bucket) PutExternalMirrorRequest(input *PutBucketExternalMirrorInput) (*request.Request, *PutBucketExternalMirrorOutput, error) { + + if input == nil { + input = &PutBucketExternalMirrorInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Bucket External Mirror", + RequestMethod: "PUT", + RequestURI: "/?mirror", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &PutBucketExternalMirrorOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutBucketExternalMirrorInput presents input for PutBucketExternalMirror. +type PutBucketExternalMirrorInput struct { + // Source site url + SourceSite *string `json:"source_site" name:"source_site" location:"elements"` // Required + +} + +// Validate validates the input for PutBucketExternalMirror. +func (v *PutBucketExternalMirrorInput) Validate() error { + + if v.SourceSite == nil { + return errors.ParameterRequiredError{ + ParameterName: "SourceSite", + ParentName: "PutBucketExternalMirrorInput", + } + } + + return nil +} + +// PutBucketExternalMirrorOutput presents output for PutBucketExternalMirror. +type PutBucketExternalMirrorOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// PutPolicy does Set policy information of the bucket. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/policy/put_policy.html +func (s *Bucket) PutPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + r, x, err := s.PutPolicyRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutPolicyRequest creates request and output object of PutBucketPolicy. +func (s *Bucket) PutPolicyRequest(input *PutBucketPolicyInput) (*request.Request, *PutBucketPolicyOutput, error) { + + if input == nil { + input = &PutBucketPolicyInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Bucket Policy", + RequestMethod: "PUT", + RequestURI: "/?policy", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &PutBucketPolicyOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutBucketPolicyInput presents input for PutBucketPolicy. +type PutBucketPolicyInput struct { + // Bucket policy statement + Statement []*StatementType `json:"statement" name:"statement" location:"elements"` // Required + +} + +// Validate validates the input for PutBucketPolicy. +func (v *PutBucketPolicyInput) Validate() error { + + if len(v.Statement) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "Statement", + ParentName: "PutBucketPolicyInput", + } + } + + if len(v.Statement) > 0 { + for _, property := range v.Statement { + if err := property.Validate(); err != nil { + return err + } + } + } + + return nil +} + +// PutBucketPolicyOutput presents output for PutBucketPolicy. +type PutBucketPolicyOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types.go b/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types.go new file mode 100644 index 000000000..10e52c061 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types.go @@ -0,0 +1,266 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package service + +import ( + "time" +) + +// String returns a pointer to the given string value. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the given string pointer or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the given bool value. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the given bool pointer or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the given int value. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the given int pointer or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the given time.Time value. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the given time.Time pointer or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types_test.go b/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types_test.go new file mode 100644 index 000000000..5660c9e40 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/convert_types_test.go @@ -0,0 +1,311 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var testCasesStringSlice = [][]string{ + {"a", "b", "c", "d", "e"}, + {"a", "b", "", "", "e"}, +} + +func TestStringSlice(t *testing.T) { + for idx, in := range testCasesStringSlice { + if in == nil { + continue + } + out := StringSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesStringValueSlice = [][]*string{ + {String("a"), String("b"), nil, String("c")}, +} + +func TestStringValueSlice(t *testing.T) { + for idx, in := range testCasesStringValueSlice { + if in == nil { + continue + } + out := StringValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := StringSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesStringMap = []map[string]string{ + {"a": "1", "b": "2", "c": "3"}, +} + +func TestStringMap(t *testing.T) { + for idx, in := range testCasesStringMap { + if in == nil { + continue + } + out := StringMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := StringValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolSlice = [][]bool{ + {true, true, false, false}, +} + +func TestBoolSlice(t *testing.T) { + for idx, in := range testCasesBoolSlice { + if in == nil { + continue + } + out := BoolSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesBoolValueSlice = [][]*bool{} + +func TestBoolValueSlice(t *testing.T) { + for idx, in := range testCasesBoolValueSlice { + if in == nil { + continue + } + out := BoolValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := BoolSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesBoolMap = []map[string]bool{ + {"a": true, "b": false, "c": true}, +} + +func TestBoolMap(t *testing.T) { + for idx, in := range testCasesBoolMap { + if in == nil { + continue + } + out := BoolMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := BoolValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntSlice = [][]int{ + {1, 2, 3, 4}, +} + +func TestIntSlice(t *testing.T) { + for idx, in := range testCasesIntSlice { + if in == nil { + continue + } + out := IntSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesIntValueSlice = [][]*int{} + +func TestIntValueSlice(t *testing.T) { + for idx, in := range testCasesIntValueSlice { + if in == nil { + continue + } + out := IntValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := IntSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesIntMap = []map[string]int{ + {"a": 3, "b": 2, "c": 1}, +} + +func TestIntMap(t *testing.T) { + for idx, in := range testCasesIntMap { + if in == nil { + continue + } + out := IntMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := IntValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeSlice = [][]time.Time{ + {time.Now(), time.Now().AddDate(100, 0, 0)}, +} + +func TestTimeSlice(t *testing.T) { + for idx, in := range testCasesTimeSlice { + if in == nil { + continue + } + out := TimeSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} + +var testCasesTimeValueSlice = [][]*time.Time{} + +func TestTimeValueSlice(t *testing.T) { + for idx, in := range testCasesTimeValueSlice { + if in == nil { + continue + } + out := TimeValueSlice(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + if in[i] == nil { + assert.Empty(t, out[i], "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) + } + } + + out2 := TimeSlice(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + for i := range out2 { + if in[i] == nil { + assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) + } else { + assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) + } + } + } +} + +var testCasesTimeMap = []map[string]time.Time{ + {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, +} + +func TestTimeMap(t *testing.T) { + for idx, in := range testCasesTimeMap { + if in == nil { + continue + } + out := TimeMap(in) + assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) + for i := range out { + assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) + } + + out2 := TimeValueMap(out) + assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) + assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) + } +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/object.go b/vendor/github.com/yunify/qingstor-sdk-go/service/object.go new file mode 100644 index 000000000..6cfae7791 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/object.go @@ -0,0 +1,926 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package service + +import ( + "fmt" + "io" + "time" + + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +var _ fmt.State +var _ io.Reader +var _ time.Time +var _ config.Config + +// AbortMultipartUpload does Abort multipart upload. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/abort_multipart_upload.html +func (s *Bucket) AbortMultipartUpload(objectKey string, input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + r, x, err := s.AbortMultipartUploadRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// AbortMultipartUploadRequest creates request and output object of AbortMultipartUpload. +func (s *Bucket) AbortMultipartUploadRequest(objectKey string, input *AbortMultipartUploadInput) (*request.Request, *AbortMultipartUploadOutput, error) { + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "Abort Multipart Upload", + RequestMethod: "DELETE", + RequestURI: "//", + StatusCodes: []int{ + 204, // Object multipart deleted + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &AbortMultipartUploadOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// AbortMultipartUploadInput presents input for AbortMultipartUpload. +type AbortMultipartUploadInput struct { + // Object multipart upload ID + UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required + +} + +// Validate validates the input for AbortMultipartUpload. +func (v *AbortMultipartUploadInput) Validate() error { + + if v.UploadID == nil { + return errors.ParameterRequiredError{ + ParameterName: "UploadID", + ParentName: "AbortMultipartUploadInput", + } + } + + return nil +} + +// AbortMultipartUploadOutput presents output for AbortMultipartUpload. +type AbortMultipartUploadOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// CompleteMultipartUpload does Complete multipart upload. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/complete_multipart_upload.html +func (s *Bucket) CompleteMultipartUpload(objectKey string, input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + r, x, err := s.CompleteMultipartUploadRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// CompleteMultipartUploadRequest creates request and output object of CompleteMultipartUpload. +func (s *Bucket) CompleteMultipartUploadRequest(objectKey string, input *CompleteMultipartUploadInput) (*request.Request, *CompleteMultipartUploadOutput, error) { + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "Complete multipart upload", + RequestMethod: "POST", + RequestURI: "//", + StatusCodes: []int{ + 201, // Object created + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &CompleteMultipartUploadOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// CompleteMultipartUploadInput presents input for CompleteMultipartUpload. +type CompleteMultipartUploadInput struct { + // Object multipart upload ID + UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required + + // MD5sum of the object part + ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` + + // Object parts + ObjectParts []*ObjectPartType `json:"object_parts,omitempty" name:"object_parts" location:"elements"` +} + +// Validate validates the input for CompleteMultipartUpload. +func (v *CompleteMultipartUploadInput) Validate() error { + + if v.UploadID == nil { + return errors.ParameterRequiredError{ + ParameterName: "UploadID", + ParentName: "CompleteMultipartUploadInput", + } + } + + if len(v.ObjectParts) > 0 { + for _, property := range v.ObjectParts { + if err := property.Validate(); err != nil { + return err + } + } + } + + return nil +} + +// CompleteMultipartUploadOutput presents output for CompleteMultipartUpload. +type CompleteMultipartUploadOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// DeleteObject does Delete the object. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/delete.html +func (s *Bucket) DeleteObject(objectKey string) (*DeleteObjectOutput, error) { + r, x, err := s.DeleteObjectRequest(objectKey) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// DeleteObjectRequest creates request and output object of DeleteObject. +func (s *Bucket) DeleteObjectRequest(objectKey string) (*request.Request, *DeleteObjectOutput, error) { + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "DELETE Object", + RequestMethod: "DELETE", + RequestURI: "//", + StatusCodes: []int{ + 204, // Object deleted + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &DeleteObjectOutput{} + r, err := request.New(o, nil, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// DeleteObjectOutput presents output for DeleteObject. +type DeleteObjectOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// GetObject does Retrieve the object. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/get.html +func (s *Bucket) GetObject(objectKey string, input *GetObjectInput) (*GetObjectOutput, error) { + r, x, err := s.GetObjectRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// GetObjectRequest creates request and output object of GetObject. +func (s *Bucket) GetObjectRequest(objectKey string, input *GetObjectInput) (*request.Request, *GetObjectOutput, error) { + + if input == nil { + input = &GetObjectInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "GET Object", + RequestMethod: "GET", + RequestURI: "//", + StatusCodes: []int{ + 200, // OK + 206, // Partial content + 304, // Not modified + 412, // Precondition failed + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &GetObjectOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// GetObjectInput presents input for GetObject. +type GetObjectInput struct { + // Specified the Cache-Control response header + ResponseCacheControl *string `json:"response-cache-control,omitempty" name:"response-cache-control" location:"params"` + // Specified the Content-Disposition response header + ResponseContentDisposition *string `json:"response-content-disposition,omitempty" name:"response-content-disposition" location:"params"` + // Specified the Content-Encoding response header + ResponseContentEncoding *string `json:"response-content-encoding,omitempty" name:"response-content-encoding" location:"params"` + // Specified the Content-Language response header + ResponseContentLanguage *string `json:"response-content-language,omitempty" name:"response-content-language" location:"params"` + // Specified the Content-Type response header + ResponseContentType *string `json:"response-content-type,omitempty" name:"response-content-type" location:"params"` + // Specified the Expires response header + ResponseExpires *string `json:"response-expires,omitempty" name:"response-expires" location:"params"` + + // Check whether the ETag matches + IfMatch *string `json:"If-Match,omitempty" name:"If-Match" location:"headers"` + // Check whether the object has been modified + IfModifiedSince *time.Time `json:"If-Modified-Since,omitempty" name:"If-Modified-Since" format:"RFC 822" location:"headers"` + // Check whether the ETag does not match + IfNoneMatch *string `json:"If-None-Match,omitempty" name:"If-None-Match" location:"headers"` + // Check whether the object has not been modified + IfUnmodifiedSince *time.Time `json:"If-Unmodified-Since,omitempty" name:"If-Unmodified-Since" format:"RFC 822" location:"headers"` + // Specified range of the object + Range *string `json:"Range,omitempty" name:"Range" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` +} + +// Validate validates the input for GetObject. +func (v *GetObjectInput) Validate() error { + + return nil +} + +// GetObjectOutput presents output for GetObject. +type GetObjectOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // The response body + Body io.ReadCloser `location:"body"` + + // Object content length + ContentLength *int64 `json:"Content-Length,omitempty" name:"Content-Length" location:"headers"` + // Range of response data content + ContentRange *string `json:"Content-Range,omitempty" name:"Content-Range" location:"headers"` + // MD5sum of the object + ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` +} + +// HeadObject does Check whether the object exists and available. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/head.html +func (s *Bucket) HeadObject(objectKey string, input *HeadObjectInput) (*HeadObjectOutput, error) { + r, x, err := s.HeadObjectRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// HeadObjectRequest creates request and output object of HeadObject. +func (s *Bucket) HeadObjectRequest(objectKey string, input *HeadObjectInput) (*request.Request, *HeadObjectOutput, error) { + + if input == nil { + input = &HeadObjectInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "HEAD Object", + RequestMethod: "HEAD", + RequestURI: "//", + StatusCodes: []int{ + 200, // OK + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &HeadObjectOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// HeadObjectInput presents input for HeadObject. +type HeadObjectInput struct { + // Check whether the ETag matches + IfMatch *string `json:"If-Match,omitempty" name:"If-Match" location:"headers"` + // Check whether the object has been modified + IfModifiedSince *time.Time `json:"If-Modified-Since,omitempty" name:"If-Modified-Since" format:"RFC 822" location:"headers"` + // Check whether the ETag does not match + IfNoneMatch *string `json:"If-None-Match,omitempty" name:"If-None-Match" location:"headers"` + // Check whether the object has not been modified + IfUnmodifiedSince *time.Time `json:"If-Unmodified-Since,omitempty" name:"If-Unmodified-Since" format:"RFC 822" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` +} + +// Validate validates the input for HeadObject. +func (v *HeadObjectInput) Validate() error { + + return nil +} + +// HeadObjectOutput presents output for HeadObject. +type HeadObjectOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Object content length + ContentLength *int64 `json:"Content-Length,omitempty" name:"Content-Length" location:"headers"` + // Object content type + ContentType *string `json:"Content-Type,omitempty" name:"Content-Type" location:"headers"` + // MD5sum of the object + ETag *string `json:"ETag,omitempty" name:"ETag" location:"headers"` + LastModified *time.Time `json:"Last-Modified,omitempty" name:"Last-Modified" format:"RFC 822" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` +} + +// InitiateMultipartUpload does Initial multipart upload on the object. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/initiate_multipart_upload.html +func (s *Bucket) InitiateMultipartUpload(objectKey string, input *InitiateMultipartUploadInput) (*InitiateMultipartUploadOutput, error) { + r, x, err := s.InitiateMultipartUploadRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// InitiateMultipartUploadRequest creates request and output object of InitiateMultipartUpload. +func (s *Bucket) InitiateMultipartUploadRequest(objectKey string, input *InitiateMultipartUploadInput) (*request.Request, *InitiateMultipartUploadOutput, error) { + + if input == nil { + input = &InitiateMultipartUploadInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "Initiate Multipart Upload", + RequestMethod: "POST", + RequestURI: "//?uploads", + StatusCodes: []int{ + 200, // OK + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &InitiateMultipartUploadOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// InitiateMultipartUploadInput presents input for InitiateMultipartUpload. +type InitiateMultipartUploadInput struct { + // Object content type + ContentType *string `json:"Content-Type,omitempty" name:"Content-Type" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` +} + +// Validate validates the input for InitiateMultipartUpload. +func (v *InitiateMultipartUploadInput) Validate() error { + + return nil +} + +// InitiateMultipartUploadOutput presents output for InitiateMultipartUpload. +type InitiateMultipartUploadOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Bucket name + Bucket *string `json:"bucket,omitempty" name:"bucket" location:"elements"` + // Object key + Key *string `json:"key,omitempty" name:"key" location:"elements"` + // Object multipart upload ID + UploadID *string `json:"upload_id,omitempty" name:"upload_id" location:"elements"` + + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` +} + +// ListMultipart does List object parts. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/list_multipart.html +func (s *Bucket) ListMultipart(objectKey string, input *ListMultipartInput) (*ListMultipartOutput, error) { + r, x, err := s.ListMultipartRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// ListMultipartRequest creates request and output object of ListMultipart. +func (s *Bucket) ListMultipartRequest(objectKey string, input *ListMultipartInput) (*request.Request, *ListMultipartOutput, error) { + + if input == nil { + input = &ListMultipartInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "List Multipart", + RequestMethod: "GET", + RequestURI: "//", + StatusCodes: []int{ + 200, // OK + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &ListMultipartOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// ListMultipartInput presents input for ListMultipart. +type ListMultipartInput struct { + // Limit results count + Limit *int `json:"limit,omitempty" name:"limit" location:"params"` + // Object multipart upload part number + PartNumberMarker *int `json:"part_number_marker,omitempty" name:"part_number_marker" location:"params"` + // Object multipart upload ID + UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required + +} + +// Validate validates the input for ListMultipart. +func (v *ListMultipartInput) Validate() error { + + if v.UploadID == nil { + return errors.ParameterRequiredError{ + ParameterName: "UploadID", + ParentName: "ListMultipartInput", + } + } + + return nil +} + +// ListMultipartOutput presents output for ListMultipart. +type ListMultipartOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Object multipart count + Count *int `json:"count,omitempty" name:"count" location:"elements"` + // Object parts + ObjectParts []*ObjectPartType `json:"object_parts,omitempty" name:"object_parts" location:"elements"` +} + +// OptionsObject does Check whether the object accepts a origin with method and header. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/options.html +func (s *Bucket) OptionsObject(objectKey string, input *OptionsObjectInput) (*OptionsObjectOutput, error) { + r, x, err := s.OptionsObjectRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// OptionsObjectRequest creates request and output object of OptionsObject. +func (s *Bucket) OptionsObjectRequest(objectKey string, input *OptionsObjectInput) (*request.Request, *OptionsObjectOutput, error) { + + if input == nil { + input = &OptionsObjectInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "OPTIONS Object", + RequestMethod: "OPTIONS", + RequestURI: "//", + StatusCodes: []int{ + 200, // OK + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &OptionsObjectOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// OptionsObjectInput presents input for OptionsObject. +type OptionsObjectInput struct { + // Request headers + AccessControlRequestHeaders *string `json:"Access-Control-Request-Headers,omitempty" name:"Access-Control-Request-Headers" location:"headers"` + // Request method + AccessControlRequestMethod *string `json:"Access-Control-Request-Method" name:"Access-Control-Request-Method" location:"headers"` // Required + // Request origin + Origin *string `json:"Origin" name:"Origin" location:"headers"` // Required + +} + +// Validate validates the input for OptionsObject. +func (v *OptionsObjectInput) Validate() error { + + if v.AccessControlRequestMethod == nil { + return errors.ParameterRequiredError{ + ParameterName: "AccessControlRequestMethod", + ParentName: "OptionsObjectInput", + } + } + + if v.Origin == nil { + return errors.ParameterRequiredError{ + ParameterName: "Origin", + ParentName: "OptionsObjectInput", + } + } + + return nil +} + +// OptionsObjectOutput presents output for OptionsObject. +type OptionsObjectOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Allowed headers + AccessControlAllowHeaders *string `json:"Access-Control-Allow-Headers,omitempty" name:"Access-Control-Allow-Headers" location:"headers"` + // Allowed methods + AccessControlAllowMethods *string `json:"Access-Control-Allow-Methods,omitempty" name:"Access-Control-Allow-Methods" location:"headers"` + // Allowed origin + AccessControlAllowOrigin *string `json:"Access-Control-Allow-Origin,omitempty" name:"Access-Control-Allow-Origin" location:"headers"` + // Expose headers + AccessControlExposeHeaders *string `json:"Access-Control-Expose-Headers,omitempty" name:"Access-Control-Expose-Headers" location:"headers"` + // Max age + AccessControlMaxAge *string `json:"Access-Control-Max-Age,omitempty" name:"Access-Control-Max-Age" location:"headers"` +} + +// PutObject does Upload the object. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/put.html +func (s *Bucket) PutObject(objectKey string, input *PutObjectInput) (*PutObjectOutput, error) { + r, x, err := s.PutObjectRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// PutObjectRequest creates request and output object of PutObject. +func (s *Bucket) PutObjectRequest(objectKey string, input *PutObjectInput) (*request.Request, *PutObjectOutput, error) { + + if input == nil { + input = &PutObjectInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "PUT Object", + RequestMethod: "PUT", + RequestURI: "//", + StatusCodes: []int{ + 201, // Object created + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &PutObjectOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// PutObjectInput presents input for PutObject. +type PutObjectInput struct { + // Object content size + ContentLength *int64 `json:"Content-Length" name:"Content-Length" location:"headers"` // Required + // Object MD5sum + ContentMD5 *string `json:"Content-MD5,omitempty" name:"Content-MD5" location:"headers"` + // Object content type + ContentType *string `json:"Content-Type,omitempty" name:"Content-Type" location:"headers"` + // Used to indicate that particular server behaviors are required by the client + Expect *string `json:"Expect,omitempty" name:"Expect" location:"headers"` + // Copy source, format (//) + XQSCopySource *string `json:"X-QS-Copy-Source,omitempty" name:"X-QS-Copy-Source" location:"headers"` + // Encryption algorithm of the object + XQSCopySourceEncryptionCustomerAlgorithm *string `json:"X-QS-Copy-Source-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Copy-Source-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSCopySourceEncryptionCustomerKey *string `json:"X-QS-Copy-Source-Encryption-Customer-Key,omitempty" name:"X-QS-Copy-Source-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSCopySourceEncryptionCustomerKeyMD5 *string `json:"X-QS-Copy-Source-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Copy-Source-Encryption-Customer-Key-MD5" location:"headers"` + // Check whether the copy source matches + XQSCopySourceIfMatch *string `json:"X-QS-Copy-Source-If-Match,omitempty" name:"X-QS-Copy-Source-If-Match" location:"headers"` + // Check whether the copy source has been modified + XQSCopySourceIfModifiedSince *time.Time `json:"X-QS-Copy-Source-If-Modified-Since,omitempty" name:"X-QS-Copy-Source-If-Modified-Since" format:"RFC 822" location:"headers"` + // Check whether the copy source does not match + XQSCopySourceIfNoneMatch *string `json:"X-QS-Copy-Source-If-None-Match,omitempty" name:"X-QS-Copy-Source-If-None-Match" location:"headers"` + // Check whether the copy source has not been modified + XQSCopySourceIfUnmodifiedSince *time.Time `json:"X-QS-Copy-Source-If-Unmodified-Since,omitempty" name:"X-QS-Copy-Source-If-Unmodified-Since" format:"RFC 822" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` + // Check whether fetch target object has not been modified + XQSFetchIfUnmodifiedSince *time.Time `json:"X-QS-Fetch-If-Unmodified-Since,omitempty" name:"X-QS-Fetch-If-Unmodified-Since" format:"RFC 822" location:"headers"` + // Fetch source, should be a valid url + XQSFetchSource *string `json:"X-QS-Fetch-Source,omitempty" name:"X-QS-Fetch-Source" location:"headers"` + // Move source, format (//) + XQSMoveSource *string `json:"X-QS-Move-Source,omitempty" name:"X-QS-Move-Source" location:"headers"` + + // The request body + Body io.Reader `location:"body"` +} + +// Validate validates the input for PutObject. +func (v *PutObjectInput) Validate() error { + + return nil +} + +// PutObjectOutput presents output for PutObject. +type PutObjectOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} + +// UploadMultipart does Upload object multipart. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/object/multipart/upload_multipart.html +func (s *Bucket) UploadMultipart(objectKey string, input *UploadMultipartInput) (*UploadMultipartOutput, error) { + r, x, err := s.UploadMultipartRequest(objectKey, input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// UploadMultipartRequest creates request and output object of UploadMultipart. +func (s *Bucket) UploadMultipartRequest(objectKey string, input *UploadMultipartInput) (*request.Request, *UploadMultipartOutput, error) { + + if input == nil { + input = &UploadMultipartInput{} + } + + o := &data.Operation{ + Config: s.Config, + Properties: s.Properties, + APIName: "Upload Multipart", + RequestMethod: "PUT", + RequestURI: "//", + StatusCodes: []int{ + 201, // Object multipart created + }, + } + + s.Properties.ObjectKey = &objectKey + + x := &UploadMultipartOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// UploadMultipartInput presents input for UploadMultipart. +type UploadMultipartInput struct { + // Object multipart upload part number + PartNumber *int `json:"part_number" name:"part_number" default:"0" location:"params"` // Required + // Object multipart upload ID + UploadID *string `json:"upload_id" name:"upload_id" location:"params"` // Required + + // Object multipart content length + ContentLength *int64 `json:"Content-Length,omitempty" name:"Content-Length" location:"headers"` + // Object multipart content MD5sum + ContentMD5 *string `json:"Content-MD5,omitempty" name:"Content-MD5" location:"headers"` + // Encryption algorithm of the object + XQSEncryptionCustomerAlgorithm *string `json:"X-QS-Encryption-Customer-Algorithm,omitempty" name:"X-QS-Encryption-Customer-Algorithm" location:"headers"` + // Encryption key of the object + XQSEncryptionCustomerKey *string `json:"X-QS-Encryption-Customer-Key,omitempty" name:"X-QS-Encryption-Customer-Key" location:"headers"` + // MD5sum of encryption key + XQSEncryptionCustomerKeyMD5 *string `json:"X-QS-Encryption-Customer-Key-MD5,omitempty" name:"X-QS-Encryption-Customer-Key-MD5" location:"headers"` + + // The request body + Body io.Reader `location:"body"` +} + +// Validate validates the input for UploadMultipart. +func (v *UploadMultipartInput) Validate() error { + + if v.PartNumber == nil { + return errors.ParameterRequiredError{ + ParameterName: "PartNumber", + ParentName: "UploadMultipartInput", + } + } + + if v.UploadID == nil { + return errors.ParameterRequiredError{ + ParameterName: "UploadID", + ParentName: "UploadMultipartInput", + } + } + + return nil +} + +// UploadMultipartOutput presents output for UploadMultipart. +type UploadMultipartOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/qingstor.go b/vendor/github.com/yunify/qingstor-sdk-go/service/qingstor.go new file mode 100644 index 000000000..7197b7816 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/qingstor.go @@ -0,0 +1,104 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +// Package service provides QingStor Service API (API Version 2016-01-06) +package service + +import ( + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +// Service QingStor provides low-cost and reliable online storage service with unlimited storage space, high read and write performance, high reliability and data safety, fine-grained access control, and easy to use API. +type Service struct { + Config *config.Config +} + +// Init initializes a new service. +func Init(c *config.Config) (*Service, error) { + return &Service{Config: c}, nil +} + +// ListBuckets does Retrieve the bucket list. +// Documentation URL: https://docs.qingcloud.com/qingstor/api/service/get.html +func (s *Service) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + r, x, err := s.ListBucketsRequest(input) + + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err +} + +// ListBucketsRequest creates request and output object of ListBuckets. +func (s *Service) ListBucketsRequest(input *ListBucketsInput) (*request.Request, *ListBucketsOutput, error) { + + if input == nil { + input = &ListBucketsInput{} + } + + o := &data.Operation{ + Config: s.Config, + APIName: "Get Service", + RequestMethod: "GET", + RequestURI: "/", + StatusCodes: []int{ + 200, // OK + }, + } + + x := &ListBucketsOutput{} + r, err := request.New(o, input, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil +} + +// ListBucketsInput presents input for ListBuckets. +type ListBucketsInput struct { + // Limits results to buckets that in the location + Location *string `json:"Location,omitempty" name:"Location" location:"headers"` +} + +// Validate validates the input for ListBuckets. +func (v *ListBucketsInput) Validate() error { + + return nil +} + +// ListBucketsOutput presents output for ListBuckets. +type ListBucketsOutput struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + // Buckets information + Buckets []*BucketType `json:"buckets,omitempty" name:"buckets" location:"elements"` + // Bucket count + Count *int `json:"count,omitempty" name:"count" location:"elements"` +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/service/types.go b/vendor/github.com/yunify/qingstor-sdk-go/service/types.go new file mode 100644 index 000000000..a57419d50 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/service/types.go @@ -0,0 +1,463 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package service + +import ( + "fmt" + "time" + + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +// Properties presents the service properties. +type Properties struct { + // Bucket name + BucketName *string `json:"bucket-name" name:"bucket-name"` // Required + // Object key + ObjectKey *string `json:"object-key" name:"object-key"` // Required + // QingCloud Zone ID + Zone *string `json:"zone" name:"zone"` +} + +// ACLType presents ACL. +type ACLType struct { + Grantee *GranteeType `json:"grantee" name:"grantee"` // Required + // Permission for this grantee + // Permission's available values: READ, WRITE, FULL_CONTROL + Permission *string `json:"permission" name:"permission"` // Required + +} + +// Validate validates the ACL. +func (v *ACLType) Validate() error { + + if v.Grantee != nil { + if err := v.Grantee.Validate(); err != nil { + return err + } + } + + if v.Grantee == nil { + return errors.ParameterRequiredError{ + ParameterName: "Grantee", + ParentName: "ACL", + } + } + + if v.Permission == nil { + return errors.ParameterRequiredError{ + ParameterName: "Permission", + ParentName: "ACL", + } + } + + if v.Permission != nil { + permissionValidValues := []string{"READ", "WRITE", "FULL_CONTROL"} + permissionParameterValue := fmt.Sprint(*v.Permission) + + permissionIsValid := false + for _, value := range permissionValidValues { + if value == permissionParameterValue { + permissionIsValid = true + } + } + + if !permissionIsValid { + return errors.ParameterValueNotAllowedError{ + ParameterName: "Permission", + ParameterValue: permissionParameterValue, + AllowedValues: permissionValidValues, + } + } + } + + return nil +} + +// BucketType presents Bucket. +type BucketType struct { + // Created time of the bucket + Created *time.Time `json:"created,omitempty" name:"created" format:"ISO 8601"` + // QingCloud Zone ID + Location *string `json:"location,omitempty" name:"location"` + // Bucket name + Name *string `json:"name,omitempty" name:"name"` + // URL to access the bucket + URL *string `json:"url,omitempty" name:"url"` +} + +// Validate validates the Bucket. +func (v *BucketType) Validate() error { + + return nil +} + +// ConditionType presents Condition. +type ConditionType struct { + IPAddress *IPAddressType `json:"ip_address,omitempty" name:"ip_address"` + IsNull *IsNullType `json:"is_null,omitempty" name:"is_null"` + NotIPAddress *NotIPAddressType `json:"not_ip_address,omitempty" name:"not_ip_address"` + StringLike *StringLikeType `json:"string_like,omitempty" name:"string_like"` + StringNotLike *StringNotLikeType `json:"string_not_like,omitempty" name:"string_not_like"` +} + +// Validate validates the Condition. +func (v *ConditionType) Validate() error { + + if v.IPAddress != nil { + if err := v.IPAddress.Validate(); err != nil { + return err + } + } + + if v.IsNull != nil { + if err := v.IsNull.Validate(); err != nil { + return err + } + } + + if v.NotIPAddress != nil { + if err := v.NotIPAddress.Validate(); err != nil { + return err + } + } + + if v.StringLike != nil { + if err := v.StringLike.Validate(); err != nil { + return err + } + } + + if v.StringNotLike != nil { + if err := v.StringNotLike.Validate(); err != nil { + return err + } + } + + return nil +} + +// CORSRuleType presents CORSRule. +type CORSRuleType struct { + // Allowed headers + AllowedHeaders []*string `json:"allowed_headers,omitempty" name:"allowed_headers"` + // Allowed methods + AllowedMethods []*string `json:"allowed_methods" name:"allowed_methods"` // Required + // Allowed origin + AllowedOrigin *string `json:"allowed_origin" name:"allowed_origin"` // Required + // Expose headers + ExposeHeaders []*string `json:"expose_headers,omitempty" name:"expose_headers"` + // Max age seconds + MaxAgeSeconds *int `json:"max_age_seconds,omitempty" name:"max_age_seconds"` +} + +// Validate validates the CORSRule. +func (v *CORSRuleType) Validate() error { + + if len(v.AllowedMethods) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "AllowedMethods", + ParentName: "CORSRule", + } + } + + if v.AllowedOrigin == nil { + return errors.ParameterRequiredError{ + ParameterName: "AllowedOrigin", + ParentName: "CORSRule", + } + } + + return nil +} + +// GranteeType presents Grantee. +type GranteeType struct { + // Grantee user ID + ID *string `json:"id,omitempty" name:"id"` + // Grantee group name + Name *string `json:"name,omitempty" name:"name"` + // Grantee type + // Type's available values: user, group + Type *string `json:"type" name:"type"` // Required + +} + +// Validate validates the Grantee. +func (v *GranteeType) Validate() error { + + if v.Type == nil { + return errors.ParameterRequiredError{ + ParameterName: "Type", + ParentName: "Grantee", + } + } + + if v.Type != nil { + typeValidValues := []string{"user", "group"} + typeParameterValue := fmt.Sprint(*v.Type) + + typeIsValid := false + for _, value := range typeValidValues { + if value == typeParameterValue { + typeIsValid = true + } + } + + if !typeIsValid { + return errors.ParameterValueNotAllowedError{ + ParameterName: "Type", + ParameterValue: typeParameterValue, + AllowedValues: typeValidValues, + } + } + } + + return nil +} + +// IPAddressType presents IPAddress. +type IPAddressType struct { + // Source IP + SourceIP []*string `json:"source_ip,omitempty" name:"source_ip"` +} + +// Validate validates the IPAddress. +func (v *IPAddressType) Validate() error { + + return nil +} + +// IsNullType presents IsNull. +type IsNullType struct { + // Refer url + Referer *bool `json:"Referer,omitempty" name:"Referer"` +} + +// Validate validates the IsNull. +func (v *IsNullType) Validate() error { + + return nil +} + +// KeyType presents Key. +type KeyType struct { + // Object created time + Created *time.Time `json:"created,omitempty" name:"created" format:"ISO 8601"` + // Whether this key is encrypted + Encrypted *bool `json:"encrypted,omitempty" name:"encrypted"` + // MD5sum of the object + Etag *string `json:"etag,omitempty" name:"etag"` + // Object key + Key *string `json:"key,omitempty" name:"key"` + // MIME type of the object + MimeType *string `json:"mime_type,omitempty" name:"mime_type"` + // Last modified time in unix time format + Modified *int `json:"modified,omitempty" name:"modified"` + // Object content size + Size *int64 `json:"size,omitempty" name:"size"` +} + +// Validate validates the Key. +func (v *KeyType) Validate() error { + + return nil +} + +// KeyDeleteErrorType presents KeyDeleteError. +type KeyDeleteErrorType struct { + // Error code + Code *string `json:"code,omitempty" name:"code"` + // Object key + Key *string `json:"key,omitempty" name:"key"` + // Error message + Message *string `json:"message,omitempty" name:"message"` +} + +// Validate validates the KeyDeleteError. +func (v *KeyDeleteErrorType) Validate() error { + + return nil +} + +// NotIPAddressType presents NotIPAddress. +type NotIPAddressType struct { + // Source IP + SourceIP []*string `json:"source_ip,omitempty" name:"source_ip"` +} + +// Validate validates the NotIPAddress. +func (v *NotIPAddressType) Validate() error { + + return nil +} + +// ObjectPartType presents ObjectPart. +type ObjectPartType struct { + // Object part created time + Created *time.Time `json:"created,omitempty" name:"created" format:"ISO 8601"` + // MD5sum of the object part + Etag *string `json:"etag,omitempty" name:"etag"` + // Object part number + PartNumber *int `json:"part_number" name:"part_number" default:"0"` // Required + // Object part size + Size *int64 `json:"size,omitempty" name:"size"` +} + +// Validate validates the ObjectPart. +func (v *ObjectPartType) Validate() error { + + if v.PartNumber == nil { + return errors.ParameterRequiredError{ + ParameterName: "PartNumber", + ParentName: "ObjectPart", + } + } + + return nil +} + +// OwnerType presents Owner. +type OwnerType struct { + // User ID + ID *string `json:"id,omitempty" name:"id"` + // Username + Name *string `json:"name,omitempty" name:"name"` +} + +// Validate validates the Owner. +func (v *OwnerType) Validate() error { + + return nil +} + +// StatementType presents Statement. +type StatementType struct { + // QingStor API methods + Action []*string `json:"action" name:"action"` // Required + Condition *ConditionType `json:"condition,omitempty" name:"condition"` + // Statement effect + // Effect's available values: allow, deny + Effect *string `json:"effect" name:"effect"` // Required + // Bucket policy id, must be unique + ID *string `json:"id" name:"id"` // Required + // The resources to apply bucket policy + Resource []*string `json:"resource,omitempty" name:"resource"` + // The user to apply bucket policy + User []*string `json:"user" name:"user"` // Required + +} + +// Validate validates the Statement. +func (v *StatementType) Validate() error { + + if len(v.Action) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "Action", + ParentName: "Statement", + } + } + + if v.Condition != nil { + if err := v.Condition.Validate(); err != nil { + return err + } + } + + if v.Effect == nil { + return errors.ParameterRequiredError{ + ParameterName: "Effect", + ParentName: "Statement", + } + } + + if v.Effect != nil { + effectValidValues := []string{"allow", "deny"} + effectParameterValue := fmt.Sprint(*v.Effect) + + effectIsValid := false + for _, value := range effectValidValues { + if value == effectParameterValue { + effectIsValid = true + } + } + + if !effectIsValid { + return errors.ParameterValueNotAllowedError{ + ParameterName: "Effect", + ParameterValue: effectParameterValue, + AllowedValues: effectValidValues, + } + } + } + + if v.ID == nil { + return errors.ParameterRequiredError{ + ParameterName: "ID", + ParentName: "Statement", + } + } + + if len(v.User) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "User", + ParentName: "Statement", + } + } + + return nil +} + +// StringLikeType presents StringLike. +type StringLikeType struct { + // Refer url + Referer []*string `json:"Referer,omitempty" name:"Referer"` +} + +// Validate validates the StringLike. +func (v *StringLikeType) Validate() error { + + return nil +} + +// StringNotLikeType presents StringNotLike. +type StringNotLikeType struct { + // Refer url + Referer []*string `json:"Referer,omitempty" name:"Referer"` +} + +// Validate validates the StringNotLike. +func (v *StringNotLikeType) Validate() error { + + return nil +} + +// UploadsType presents Uploads. +type UploadsType struct { + // Object part created time + Created *time.Time `json:"created,omitempty" name:"created" format:"ISO 8601"` + // Object key + Key *string `json:"key,omitempty" name:"key"` + // Object upload id + UploadID *string `json:"upload_id,omitempty" name:"upload_id"` +} + +// Validate validates the Uploads. +func (v *UploadsType) Validate() error { + + return nil +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/template/manifest.json b/vendor/github.com/yunify/qingstor-sdk-go/template/manifest.json new file mode 100644 index 000000000..9d2f86f86 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/template/manifest.json @@ -0,0 +1,8 @@ +{ + "output": { + "file_naming": { + "style": "snake_case", + "extension": ".go" + } + } +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/template/service.tmpl b/vendor/github.com/yunify/qingstor-sdk-go/template/service.tmpl new file mode 100644 index 000000000..30da61566 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/template/service.tmpl @@ -0,0 +1,40 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +{{$service := .Data.Service}} + +// Package service provides {{$service.Name}} Service API (API Version {{$service.APIVersion}}) +package service + +import ( + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request" + "github.com/yunify/qingstor-sdk-go/request/data" +) + +{{if $service.Description}}// Service {{$service.Description}}{{end}} +type Service struct { + Config *config.Config +} + +// Init initializes a new service. +func Init(c *config.Config) (*Service, error) { + return &Service{Config: c}, nil +} + +{{range $_, $operation := $service.Operations}} + {{template "RenderOperation" passThrough $service $operation}} +{{end}} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/template/shared.tmpl b/vendor/github.com/yunify/qingstor-sdk-go/template/shared.tmpl new file mode 100644 index 000000000..96c19dfaa --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/template/shared.tmpl @@ -0,0 +1,390 @@ +{{define "Type"}} + {{- $typeName := index . 0 -}} + {{- $disablePointer := index . 1 -}} + + {{- if eq $typeName "string" -}} + {{- if not $disablePointer -}}*{{- end -}}string + {{- else if eq $typeName "boolean" -}} + {{- if not $disablePointer -}}*{{- end -}}bool + {{- else if eq $typeName "integer" -}} + {{- if not $disablePointer -}}*{{- end -}}int + {{- else if eq $typeName "long" -}} + {{- if not $disablePointer -}}*{{- end -}}int64 + {{- else if eq $typeName "timestamp" -}} + {{- if not $disablePointer -}}*{{- end -}}time.Time + {{- else if eq $typeName "binary" -}} + io.Reader + {{- else if eq $typeName "array" -}} + interface{} + {{- else if eq $typeName "object" -}} + interface{} + {{- else if eq $typeName "map" -}} + interface{} + {{- else if eq $typeName "any" -}} + interface{} + {{- else -}} + *{{$typeName | camelCase}}Type + {{- end -}} +{{end}} + +{{define "PropertyType"}} + {{- $property := index . 0 -}} + {{- $disablePointer := index . 1 -}} + + {{- if eq $property.Type "object" -}} + {{template "Type" passThrough $property.ExtraType $disablePointer}} + {{- else if eq $property.Type "array" -}} + []{{template "Type" passThrough $property.ExtraType $disablePointer}} + {{- else if eq $property.Type "map" -}} + map[string]{{template "Type" passThrough $property.ExtraType $disablePointer}} + {{- else if eq $property.Type "any" -}} + {{template "Type" passThrough $property.Type $disablePointer}} + {{- else -}} + {{template "Type" passThrough $property.Type $disablePointer}} + {{- end -}} +{{end}} + +{{define "PropertyTags"}} + {{- $property := . -}} + {{- if $property.IsRequired -}} + {{- printf `json:"%s"` ($property.Name | normalized) -}} + {{- else -}} + {{- printf `json:"%s,omitempty"` ($property.Name | normalized) -}} + {{- end -}} + {{- printf ` name:"%s"` ($property.Name | normalized) -}} + {{- if $property.Format}} + {{- printf ` format:"%s"` $property.Format -}} + {{- end -}} + {{- if $property.Default -}} + {{- printf ` default:"%s"` $property.Default -}} + {{- end -}} +{{end}} + +{{define "PropertyTagsDashConnected"}} + {{- $property := . -}} + {{- printf `json:"%s"` ($property.Name | dashConnected) -}} + {{- printf ` name:"%s"` ($property.Name | dashConnected) -}} +{{end}} + +{{define "PropertyExtraTags"}} + {{- $propertyExtraTags := . -}} + {{- if $propertyExtraTags -}} + {{- printf " %s" $propertyExtraTags -}} + {{- end -}} +{{end}} + +{{define "RenderProperties"}} + {{- $customizedType := index . 0 -}} + {{- $propertyExtraTags := index . 1 -}} + {{- $operationName := index . 2 -}} + + {{range $_, $property := $customizedType.Properties -}} + {{if or (ne $operationName "Delete Multiple Objects") (ne $property.ID "Content-MD5") -}} + {{if $property.Description -}} + // {{$property.Description}} + {{end -}} + {{if $property.Enum -}} + // {{$property.ID | camelCase}}'s available values: {{$property.Enum | commaConnected}} + {{end -}} + {{$property.ID | camelCase | upperFirst}}{{" " -}} + {{template "PropertyType" passThrough $property false}}{{" " -}} + `{{template "PropertyTags" $property}}{{template "PropertyExtraTags" $propertyExtraTags}}`{{" " -}} + {{if $property.IsRequired -}} + // Required + {{- end}} + {{- end}} + {{end}} +{{end}} + +{{define "RenderOperation"}} + {{$service := index . 0}} + {{$operation := index . 1}} + + {{$belongs := replace $service.Name "QingStor" "Service" -1}} + {{$belongs := replace $belongs "Object" "Bucket" -1}} + {{$opID := $operation.ID | camelCase}} + + {{$isBucket := eq $service.Name "Bucket"}} + {{$isObject := eq $service.Name "Object"}} + + {{$hasParams := gt (len $operation.Request.Params.Properties) 0}} + {{$hasHeaders := gt (len $operation.Request.Headers.Properties) 0}} + {{$hasElements := gt (len $operation.Request.Elements.Properties) 0}} + {{$hasStringBody := eq $operation.Request.Body.Type "string"}} + {{$hasBinaryBody := eq $operation.Request.Body.Type "binary"}} + {{$hasInput := or $hasParams $hasHeaders $hasElements $hasStringBody $hasBinaryBody}} + + {{if $operation.Description -}} + {{if eq $belongs "Bucket" -}} + // {{replace $opID "Bucket" "" -1}} does {{$operation.Description}} + {{else -}} + // {{$opID}} does {{$operation.Description}} + {{end -}} + {{end -}} + {{if $operation.DocumentationURL -}} + // Documentation URL: {{$operation.DocumentationURL}} + {{- end}} + {{if eq $belongs "Bucket" -}} + func (s *{{$belongs}}) {{replace $opID "Bucket" "" -1 -}}( + {{- if $isObject}}objectKey string,{{end -}} + {{- if $hasInput}}input *{{$opID}}Input{{end -}} + ) (*{{$opID}}Output, error) { + {{else -}} + func (s *{{$belongs}}) {{$opID}}( + {{- if $hasInput}}input *{{$opID}}Input{{end -}} + ) (*{{$opID}}Output, error) { + {{end -}} + {{if eq $belongs "Bucket" -}} + r, x, err := s.{{replace $opID "Bucket" "" -1}}Request( + {{- if $isObject}}objectKey,{{end -}} + {{- if $hasInput}}input{{end -}} + ) + {{else -}} + r, x, err := s.{{$opID}}Request( + {{- if $hasInput}}input{{end -}} + ) + {{end}} + if err != nil { + return x, err + } + + err = r.Send() + if err != nil { + return nil, err + } + + requestID := r.HTTPResponse.Header.Get("X-Qs-Request-Id") + x.RequestID = &requestID + + return x, err + } + + {{if $operation.Description -}} + {{if eq $belongs "Bucket" -}} + // {{replace $opID "Bucket" "" -1}}Request creates request and output object of {{$opID}}. + {{else -}} + // {{$opID}}Request creates request and output object of {{$opID}}. + {{end -}} + {{end -}} + {{if eq $belongs "Bucket" -}} + func (s *{{$belongs}}) {{replace $opID "Bucket" "" -1 -}}Request( + {{- if $isObject}}objectKey string,{{end -}} + {{- if $hasInput}}input *{{$opID}}Input{{end -}} + ) (*request.Request, *{{$opID}}Output, error) { + {{else -}} + func (s *{{$belongs}}) {{$opID}}Request( + {{- if $hasInput}}input *{{$opID}}Input{{end -}} + ) (*request.Request, *{{$opID}}Output, error) { + {{end -}} + {{if $hasInput}} + if input == nil { + input = &{{$opID}}Input{} + } + {{end}} + {{$uri := $operation.Request.URI}} + {{$uri := replace $uri "{" "<" -1}} + {{$uri := replace $uri "}" ">" -1}} + {{$uri := dashConnected $uri}} + + o := &data.Operation{ + Config: s.Config, + {{- if ne $belongs "Service"}} + Properties: s.Properties, + {{- end}} + APIName: "{{$operation.Name}}", + RequestMethod: "{{$operation.Request.Method}}", + RequestURI: "{{$uri}}", + {{if $operation.Response.StatusCodes -}} + StatusCodes: []int{ + {{range $statusCodeNumber, $statusCode := $operation.Response.StatusCodes -}} + {{$statusCodeNumber}}, + {{- if $statusCode.Description -}} + // {{$statusCode.Description}} + {{- end}} + {{end -}} + }, + {{else}} + StatusCodes: []int{ + 200, // OK + }, + {{end -}} + } + + {{if eq $service.Name "Object"}} + s.Properties.ObjectKey = &objectKey + {{end}} + + x := &{{$opID}}Output{} + r, err := request.New(o, {{if $hasInput}}input{{else}}nil{{end}}, x) + if err != nil { + return nil, nil, err + } + + return r, x, nil + } + + {{if $hasInput}} + // {{$opID}}Input presents input for {{$opID}}. + type {{$opID}}Input struct { + {{- if $operation.Request.Params.Properties | len}} + {{$data := $operation.Request.Params -}} + {{template "RenderProperties" passThrough $data `location:"params"` $operation.Name}} + {{end}} + + {{- if $operation.Request.Headers.Properties | len}} + {{$data := $operation.Request.Headers -}} + {{template "RenderProperties" passThrough $data `location:"headers"` $operation.Name}} + {{end}} + + {{- if $operation.Request.Elements.Properties | len}} + {{$data := $operation.Request.Elements -}} + {{template "RenderProperties" passThrough $data `location:"elements"` $operation.Name}} + {{end}} + + {{- if eq $operation.Request.Body.Type "string"}} + {{if $operation.Request.Body.Description -}} + // {{$operation.Request.Body.Description}} + {{- end}} + Body string `location:"body"` + {{else if eq $operation.Request.Body.Type "binary"}} + {{if $operation.Request.Body.Description -}} + // {{$operation.Request.Body.Description}} + {{- end}} + Body io.Reader `location:"body"` + {{end}} + } + + // Validate validates the input for {{$opID}}. + func (v *{{$opID}}Input) Validate() error { + {{template "ValidateCustomizedType" passThrough $operation.Request.Params $operation.Name}} + {{template "ValidateCustomizedType" passThrough $operation.Request.Headers $operation.Name}} + {{template "ValidateCustomizedType" passThrough $operation.Request.Elements $operation.Name}} + + return nil + } + {{end}} + + // {{$opID}}Output presents output for {{$opID}}. + type {{$opID}}Output struct { + StatusCode *int `location:"statusCode"` + + RequestID *string `location:"requestID"` + + {{if eq $operation.Response.Body.Type "string"}} + {{if $operation.Response.Body.Description -}} + // {{$operation.Response.Body.Description}} + {{- end}} + Body string `location:"body"` + {{else if eq $operation.Response.Body.Type "binary"}} + {{if $operation.Response.Body.Description -}} + // {{$operation.Response.Body.Description}} + {{- end}} + Body io.ReadCloser `location:"body"` + {{end}} + + {{if $operation.Response.Elements.Properties | len}} + {{$data := $operation.Response.Elements}} + {{template "RenderProperties" passThrough $data `location:"elements"` $operation.Name}} + {{end}} + + {{if $operation.Response.Headers.Properties | len}} + {{$data := $operation.Response.Headers}} + {{template "RenderProperties" passThrough $data `location:"headers"` $operation.Name}} + {{end}} + } +{{end}} + +{{define "SubServiceInitParams"}} + {{- $customizedType := index . 0 -}} + {{- $disablePointer := index . 1 -}} + + {{- range $_, $property := $customizedType.Properties -}} + {{$property.ID | camelCase | lowerFirstWord}}{{" " -}} + {{template "PropertyType" passThrough $property $disablePointer}}, + {{- end -}} +{{end}} + +{{define "ValidateCustomizedType"}} + {{$customizedType := index . 0}} + {{$operationName := index . 1}} + + {{range $_, $property := $customizedType.Properties}} + {{if or (ne $operationName "Delete Multiple Objects") (ne $property.ID "Content-MD5") -}} + {{$isNormalType := or (eq $property.Type "string") (eq $property.Type "integer")}} + {{$isContentLength := eq $property.ID "Content-Length"}} + {{if and $isNormalType (not $isContentLength) }} + {{if $property.IsRequired }} + if v.{{$property.ID | camelCase}} == nil { + return errors.ParameterRequiredError{ + ParameterName: "{{$property.ID | camelCase}}", + ParentName: "{{$customizedType.ID | camelCase}}", + } + } + {{end}} + {{$parameterName := $property.ID | camelCase | lowerFirstWord}} + {{if gt ($property.Enum | len) 0}} + if v.{{$property.ID | camelCase}} != nil { + {{$parameterName}}ValidValues := []string{ + {{- $property.Enum | commaConnectedWithQuote -}} + } + {{$parameterName}}ParameterValue := fmt.Sprint(*v.{{$property.ID | camelCase}}) + + {{$parameterName}}IsValid := false + for _, value := range {{$parameterName}}ValidValues { + if value == {{$parameterName}}ParameterValue { + {{$parameterName}}IsValid = true + } + } + + if !{{$parameterName}}IsValid { + return errors.ParameterValueNotAllowedError{ + ParameterName: "{{$property.ID | camelCase}}", + ParameterValue: {{$parameterName}}ParameterValue, + AllowedValues: {{$parameterName}}ValidValues, + } + } + } + {{end}} + {{end}} + + {{if eq $property.Type "object"}} + if v.{{$property.ID | camelCase}} != nil { + if err := v.{{$property.ID | camelCase}}.Validate(); err != nil { + return err + } + } + {{if $property.IsRequired }} + if v.{{$property.ID | camelCase}} == nil { + return errors.ParameterRequiredError{ + ParameterName: "{{$property.ID | camelCase}}", + ParentName: "{{$customizedType.ID | camelCase}}", + } + } + {{end}} + {{end}} + + {{if eq $property.Type "array"}} + {{if $property.IsRequired}} + if len(v.{{$property.ID | camelCase}}) == 0 { + return errors.ParameterRequiredError{ + ParameterName: "{{$property.ID | camelCase}}", + ParentName: "{{$customizedType.ID | camelCase}}", + } + } + {{end}} + {{$isNotString := ne $property.ExtraType "string"}} + {{$isNotInteger := ne $property.ExtraType "integer"}} + {{$isNotTimestamp := ne $property.ExtraType "timestamp"}} + {{if and $isNotString $isNotInteger $isNotTimestamp}} + if len(v.{{$property.ID | camelCase}}) > 0 { + for _, property := range v.{{$property.ID | camelCase}} { + if err := property.Validate(); err != nil { + return err + } + } + } + {{end}} + {{end}} + {{end}} + {{end}} +{{end}} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/template/sub_service.tmpl b/vendor/github.com/yunify/qingstor-sdk-go/template/sub_service.tmpl new file mode 100644 index 000000000..9184919bf --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/template/sub_service.tmpl @@ -0,0 +1,61 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +{{$service := .Data.Service}} +{{$subService := index .Data.SubServices .CurrentSubServiceID}} + +package service + +import ( + "fmt" + "io" + "time" + + "github.com/yunify/qingstor-sdk-go/config" + "github.com/yunify/qingstor-sdk-go/request" + "github.com/yunify/qingstor-sdk-go/request/data" + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +var _ fmt.State +var _ io.Reader +var _ time.Time +var _ config.Config + +{{if ne $subService.Name "Object"}} + // {{$subService.ID | camelCase}} presents {{$subService.ID | snakeCase}}. + type {{$subService.ID | camelCase}} struct { + Config *config.Config + Properties *Properties + } + + // {{$subService.ID | camelCase}} initializes a new {{$subService.ID | snakeCase}}. + func (s *Service) {{$subService.ID | camelCase}}( + {{- template "SubServiceInitParams" passThrough $subService.Properties true -}} + ) (*{{$subService.ID | camelCase}}, error) { + properties := &Properties{ + {{range $_, $property := $subService.Properties.Properties -}} + {{$property.ID | upperFirst}}: &{{$property.ID}}, + {{end -}} + } + + return &{{$subService.ID | camelCase}}{Config: s.Config, Properties: properties}, nil + } +{{end}} + +{{range $_, $operation := $subService.Operations}} + {{template "RenderOperation" passThrough $subService $operation}} +{{end}} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/template/types.tmpl b/vendor/github.com/yunify/qingstor-sdk-go/template/types.tmpl new file mode 100644 index 000000000..a2e495fbe --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/template/types.tmpl @@ -0,0 +1,62 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +{{$service := .Data.Service}} +{{$objectSubService := index .Data.SubServices "Object"}} +{{$customizedTypes := .Data.CustomizedTypes}} + +package service + +import ( + "fmt" + "time" + + "github.com/yunify/qingstor-sdk-go/request/errors" +) + +// Properties presents the service properties. +type Properties struct { + {{- template "RenderProperties" passThrough $service.Properties "" "" -}} + + {{range $_, $p := $objectSubService.Properties.Properties -}} + {{- if $p.Description -}} + // {{$p.Description}} + {{end -}} + {{if $p.Enum -}} + // {{camelCase $p.ID}}'s available values: {{commaConnected $p.Enum}} + {{end -}} + {{$p.ID | camelCase | upperFirst}}{{" " -}} + {{template "PropertyType" passThrough $p false}}{{" " -}} + `{{template "PropertyTagsDashConnected" $p}}`{{" " -}} + {{if $p.IsRequired -}} + // Required + {{- end}} + {{end}} +} + +{{range $_, $customizedType := $customizedTypes}} + // {{$customizedType.ID | camelCase}}Type presents {{$customizedType.ID | camelCase}}. + type {{$customizedType.ID | camelCase}}Type struct { + {{template "RenderProperties" passThrough $customizedType "" ""}} + } + + // Validate validates the {{$customizedType.ID | camelCase}}. + func (v *{{$customizedType.ID | camelCase}}Type) Validate() error { + {{template "ValidateCustomizedType" passThrough $customizedType ""}} + + return nil + } +{{end}} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/.gitignore b/vendor/github.com/yunify/qingstor-sdk-go/test/.gitignore new file mode 100644 index 000000000..4e3825eda --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/.gitignore @@ -0,0 +1,3 @@ +# Ignore local config +config.yaml +test_config.yaml diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/bucket.go b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket.go new file mode 100644 index 000000000..3b829228c --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket.go @@ -0,0 +1,269 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/DATA-DOG/godog" + "github.com/DATA-DOG/godog/gherkin" + + qsErrors "github.com/yunify/qingstor-sdk-go/request/errors" + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// BucketFeatureContext provides feature context for bucket. +func BucketFeatureContext(s *godog.Suite) { + s.Step(`^initialize the bucket$`, initializeTheBucket) + s.Step(`^the bucket is initialized$`, theBucketIsInitialized) + + s.Step(`^put bucket$`, putBucketFake) + s.Step(`^put bucket status code is (\d+)$`, putBucketStatusCodeIsFake) + s.Step(`^put same bucket again$`, putSameBucketAgain) + s.Step(`^put same bucket again status code is (\d+)$$`, putSameBucketAgainStatusCodeIs) + + s.Step(`^list objects$`, listObjects) + s.Step(`^list objects status code is (\d+)$`, listObjectsStatusCodeIs) + s.Step(`^list objects keys count is (\d+)$`, listObjectsKeysCountIs) + + s.Step(`^head bucket$`, headBucket) + s.Step(`^head bucket status code is (\d+)$`, headBucketStatusCodeIs) + + s.Step(`^delete bucket$`, deleteBucketFake) + s.Step(`^delete bucket status code is (\d+)$`, deleteBucketStatusCodeIsFake) + + s.Step(`^delete multiple objects:$`, deleteMultipleObjects) + s.Step(`^delete multiple objects code is (\d+)$`, deleteMultipleObjectsCodeIs) + + s.Step(`^get bucket statistics$`, getBucketStatistics) + s.Step(`^get bucket statistics status code is (\d+)$`, getBucketStatisticsStatusCodeIs) + s.Step(`^get bucket statistics status is "([^"]*)"$`, getBucketStatisticsStatusIs) + + s.Step(`^an object created by initiate multipart upload$`, anObjectCreatedByInitiateMultipartUpload) + s.Step(`^list multipart uploads$`, listMultipartUploads) + s.Step(`^list multipart uploads count is (\d+)$`, listMultipartUploadsCountIs) + s.Step(`^list multipart uploads with prefix$`, listMultipartUploadsWithPrefix) + s.Step(`^list multipart uploads with prefix count is (\d+)$`, listMultipartUploadsWithPrefixCountIs) +} + +// -------------------------------------------------------------------------- + +var bucket *qs.Bucket + +func initializeTheBucket() error { + bucket, err = qsService.Bucket(tc.BucketName, tc.Zone) + return err +} + +func theBucketIsInitialized() error { + if bucket == nil { + return errors.New("Bucket is not initialized") + } + return nil +} + +// -------------------------------------------------------------------------- + +var putBucketOutput *qs.PutBucketOutput + +func putBucket() error { + putBucketOutput, err = bucket.Put() + return err +} + +func putBucketFake() error { + return nil +} + +func putBucketStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putBucketOutput.StatusCode), statusCode) +} + +func putBucketStatusCodeIsFake(_ int) error { + return nil +} + +// -------------------------------------------------------------------------- + +func putSameBucketAgain() error { + _, err = bucket.Put() + return nil +} + +func putSameBucketAgainStatusCodeIs(statusCode int) error { + switch e := err.(type) { + case *qsErrors.QingStorError: + return checkEqual(e.StatusCode, statusCode) + } + + return fmt.Errorf("put same bucket again should get \"%d\"", statusCode) +} + +// -------------------------------------------------------------------------- + +var listObjectsOutput *qs.ListObjectsOutput + +func listObjects() error { + listObjectsOutput, err = bucket.ListObjects(&qs.ListObjectsInput{ + Delimiter: qs.String("/"), + Limit: qs.Int(1000), + Prefix: qs.String("Test/"), + Marker: qs.String("Next"), + }) + return err +} + +func listObjectsStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(listObjectsOutput.StatusCode), statusCode) +} + +func listObjectsKeysCountIs(count int) error { + return checkEqual(len(listObjectsOutput.Keys), count) +} + +// -------------------------------------------------------------------------- + +var headBucketOutput *qs.HeadBucketOutput + +func headBucket() error { + headBucketOutput, err = bucket.Head() + return err +} + +func headBucketStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(headBucketOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var deleteBucketOutput *qs.DeleteBucketOutput + +func deleteBucket() error { + deleteBucketOutput, err = bucket.Delete() + return err +} + +func deleteBucketFake() error { + return nil +} + +func deleteBucketStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteBucketOutput.StatusCode), statusCode) +} + +func deleteBucketStatusCodeIsFake(_ int) error { + return nil +} + +// -------------------------------------------------------------------------- + +var deleteMultipleObjectsOutput *qs.DeleteMultipleObjectsOutput + +func deleteMultipleObjects(requestJSON *gherkin.DocString) error { + _, err := bucket.PutObject("object_0", nil) + if err != nil { + return err + } + _, err = bucket.PutObject("object_1", nil) + if err != nil { + return err + } + _, err = bucket.PutObject("object_2", nil) + if err != nil { + return err + } + + deleteMultipleObjectsInput := &qs.DeleteMultipleObjectsInput{} + err = json.Unmarshal([]byte(requestJSON.Content), deleteMultipleObjectsInput) + if err != nil { + return err + } + + deleteMultipleObjectsOutput, err = bucket.DeleteMultipleObjects( + &qs.DeleteMultipleObjectsInput{ + Objects: deleteMultipleObjectsInput.Objects, + Quiet: deleteMultipleObjectsInput.Quiet, + }, + ) + return err +} + +func deleteMultipleObjectsCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteMultipleObjectsOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getBucketStatisticsOutput *qs.GetBucketStatisticsOutput + +func getBucketStatistics() error { + getBucketStatisticsOutput, err = bucket.GetStatistics() + return err +} + +func getBucketStatisticsStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getBucketStatisticsOutput.StatusCode), statusCode) +} + +func getBucketStatisticsStatusIs(status string) error { + return checkEqual(qs.StringValue(getBucketStatisticsOutput.Status), status) +} + +// -------------------------------------------------------------------------- +var listMultipartUploadsOutputObjectKey = "list_multipart_uploads_object_key" +var listMultipartUploadsInitiateOutput *qs.InitiateMultipartUploadOutput +var listMultipartUploadsOutput *qs.ListMultipartUploadsOutput + +func anObjectCreatedByInitiateMultipartUpload() error { + listMultipartUploadsInitiateOutput, err = bucket.InitiateMultipartUpload( + listMultipartUploadsOutputObjectKey, nil, + ) + return err +} + +func listMultipartUploads() error { + listMultipartUploadsOutput, err = bucket.ListMultipartUploads(nil) + return err +} + +func listMultipartUploadsCountIs(count int) error { + return checkEqual(len(listMultipartUploadsOutput.Uploads), count) +} + +func listMultipartUploadsWithPrefix() error { + listMultipartUploadsOutput, err = bucket.ListMultipartUploads( + &qs.ListMultipartUploadsInput{ + Prefix: qs.String(listMultipartUploadsOutputObjectKey), + }, + ) + return err +} + +func listMultipartUploadsWithPrefixCountIs(count int) error { + _, err = bucket.AbortMultipartUpload( + listMultipartUploadsOutputObjectKey, &qs.AbortMultipartUploadInput{ + UploadID: listMultipartUploadsInitiateOutput.UploadID, + }, + ) + if err != nil { + return err + } + + return checkEqual(len(listMultipartUploadsOutput.Uploads), count) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_acl.go b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_acl.go new file mode 100644 index 000000000..5971e9594 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_acl.go @@ -0,0 +1,79 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/DATA-DOG/godog" + "github.com/DATA-DOG/godog/gherkin" + + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// BucketACLFeatureContext provides feature context for bucket ACL. +func BucketACLFeatureContext(s *godog.Suite) { + s.Step(`^put bucket ACL:$`, putBucketACL) + s.Step(`^put bucket ACL status code is (\d+)$`, putBucketACLStatusCodeIs) + + s.Step(`^get bucket ACL$`, getBucketACL) + s.Step(`^get bucket ACL status code is (\d+)$`, getBucketACLStatusCodeIs) + s.Step(`^get bucket ACL should have grantee name "([^"]*)"$`, getBucketACLShouldHaveGranteeName) +} + +// -------------------------------------------------------------------------- + +var putBucketACLOutput *qs.PutBucketACLOutput + +func putBucketACL(ACLJSONText *gherkin.DocString) error { + putBucketACLInput := &qs.PutBucketACLInput{} + err = json.Unmarshal([]byte(ACLJSONText.Content), putBucketACLInput) + if err != nil { + return err + } + + putBucketACLOutput, err = bucket.PutACL(putBucketACLInput) + return err +} + +func putBucketACLStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putBucketACLOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getBucketACLOutput *qs.GetBucketACLOutput + +func getBucketACL() error { + getBucketACLOutput, err = bucket.GetACL() + return err +} + +func getBucketACLStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getBucketACLOutput.StatusCode), statusCode) +} + +func getBucketACLShouldHaveGranteeName(name string) error { + for _, ACL := range getBucketACLOutput.ACL { + if qs.StringValue(ACL.Grantee.Name) == name { + return nil + } + } + + return fmt.Errorf("Grantee name \"%s\" not found in bucket ACLs", name) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_cors.go b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_cors.go new file mode 100644 index 000000000..419648554 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_cors.go @@ -0,0 +1,95 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/DATA-DOG/godog" + "github.com/DATA-DOG/godog/gherkin" + + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// BucketCORSFeatureContext provides feature context for bucket CORS. +func BucketCORSFeatureContext(s *godog.Suite) { + s.Step(`^put bucket CORS:$`, putBucketCORS) + s.Step(`^put bucket CORS status code is (\d+)$`, putBucketCORSStatusCodeIs) + + s.Step(`^get bucket CORS$`, getBucketCORS) + s.Step(`^get bucket CORS status code is (\d+)$`, getBucketCORSStatusCodeIs) + s.Step(`^get bucket CORS should have allowed origin "([^"]*)"$`, getBucketCORSShouldHaveAllowedOrigin) + + s.Step(`^delete bucket CORS`, deleteBucketCORS) + s.Step(`^delete bucket CORS status code is (\d+)$`, deleteBucketCORSStatusCodeIs) +} + +// -------------------------------------------------------------------------- + +var putBucketCORSOutput *qs.PutBucketCORSOutput + +func putBucketCORS(CORSJSONText *gherkin.DocString) error { + putBucketCORSInput := &qs.PutBucketCORSInput{} + err = json.Unmarshal([]byte(CORSJSONText.Content), putBucketCORSInput) + if err != nil { + return err + } + + putBucketCORSOutput, err = bucket.PutCORS(putBucketCORSInput) + return err +} + +func putBucketCORSStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putBucketCORSOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getBucketCORSOutput *qs.GetBucketCORSOutput + +func getBucketCORS() error { + getBucketCORSOutput, err = bucket.GetCORS() + return err +} + +func getBucketCORSStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getBucketCORSOutput.StatusCode), statusCode) +} + +func getBucketCORSShouldHaveAllowedOrigin(origin string) error { + for _, CORSRule := range getBucketCORSOutput.CORSRules { + if qs.StringValue(CORSRule.AllowedOrigin) == origin { + return nil + } + } + + return fmt.Errorf("Allowed origin \"%s\" not found in bucket CORS rules", origin) +} + +// -------------------------------------------------------------------------- + +var deleteBucketCORSOutput *qs.DeleteBucketCORSOutput + +func deleteBucketCORS() error { + deleteBucketCORSOutput, err = bucket.DeleteCORS() + return err +} + +func deleteBucketCORSStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteBucketCORSOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_external_mirror.go b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_external_mirror.go new file mode 100644 index 000000000..5c3880eaa --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_external_mirror.go @@ -0,0 +1,88 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "encoding/json" + + "github.com/DATA-DOG/godog" + "github.com/DATA-DOG/godog/gherkin" + + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// BucketExternalMirrorFeatureContext provides feature context for bucket external mirror. +func BucketExternalMirrorFeatureContext(s *godog.Suite) { + s.Step(`^put bucket external mirror:$`, putBucketExternalMirror) + s.Step(`^put bucket external mirror status code is (\d+)$`, putBucketExternalMirrorStatusCodeIs) + + s.Step(`^get bucket external mirror$`, getBucketExternalMirror) + s.Step(`^get bucket external mirror status code is (\d+)$`, getBucketExternalMirrorStatusCodeIs) + s.Step(`^get bucket external mirror should have source_site "([^"]*)"$`, getBucketExternalMirrorShouldHaveSourceSite) + + s.Step(`^delete bucket external mirror$`, deleteBucketExternalMirror) + s.Step(`^delete bucket external mirror status code is (\d+)$`, deleteBucketExternalMirrorStatusCodeIs) +} + +// -------------------------------------------------------------------------- + +var putBucketExternalMirrorOutput *qs.PutBucketExternalMirrorOutput + +func putBucketExternalMirror(ExternalMirrorJSONText *gherkin.DocString) error { + putBucketExternalMirrorInput := &qs.PutBucketExternalMirrorInput{} + err = json.Unmarshal([]byte(ExternalMirrorJSONText.Content), putBucketExternalMirrorInput) + if err != nil { + return err + } + + putBucketExternalMirrorOutput, err = bucket.PutExternalMirror(putBucketExternalMirrorInput) + return err +} + +func putBucketExternalMirrorStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putBucketExternalMirrorOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getBucketExternalMirrorOutput *qs.GetBucketExternalMirrorOutput + +func getBucketExternalMirror() error { + getBucketExternalMirrorOutput, err = bucket.GetExternalMirror() + return err +} + +func getBucketExternalMirrorStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getBucketExternalMirrorOutput.StatusCode), statusCode) +} + +func getBucketExternalMirrorShouldHaveSourceSite(sourceSite string) error { + return checkEqual(qs.StringValue(getBucketExternalMirrorOutput.SourceSite), sourceSite) +} + +// -------------------------------------------------------------------------- + +var deleteBucketExternalMirrorOutput *qs.DeleteBucketExternalMirrorOutput + +func deleteBucketExternalMirror() error { + deleteBucketExternalMirrorOutput, err = bucket.DeleteExternalMirror() + return err +} + +func deleteBucketExternalMirrorStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteBucketExternalMirrorOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_policy.go b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_policy.go new file mode 100644 index 000000000..4e2e99e79 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/bucket_policy.go @@ -0,0 +1,105 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "encoding/json" + "fmt" + + "github.com/DATA-DOG/godog" + "github.com/DATA-DOG/godog/gherkin" + + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// BucketPolicyFeatureContext provides feature context for bucket policy. +func BucketPolicyFeatureContext(s *godog.Suite) { + s.Step(`^put bucket policy:$`, putBucketPolicy) + s.Step(`^put bucket policy status code is (\d+)$`, putBucketPolicyStatusCodeIs) + + s.Step(`^get bucket policy$`, getBucketPolicy) + s.Step(`^get bucket policy status code is (\d+)$`, getBucketPolicyStatusCodeIs) + s.Step(`^get bucket policy should have Referer "([^"]*)"$`, getBucketPolicyShouldHaveReferer) + + s.Step(`^delete bucket policy$`, deleteBucketPolicy) + s.Step(`^delete bucket policy status code is (\d+)$`, deleteBucketPolicyStatusCodeIs) +} + +// -------------------------------------------------------------------------- + +var putBucketPolicyOutput *qs.PutBucketPolicyOutput + +func putBucketPolicy(PolicyJSONText *gherkin.DocString) error { + putBucketPolicyInput := &qs.PutBucketPolicyInput{} + err = json.Unmarshal([]byte(PolicyJSONText.Content), putBucketPolicyInput) + if err != nil { + return err + } + + if len(putBucketPolicyInput.Statement) == 1 { + putBucketPolicyInput.Statement[0].Resource = qs.StringSlice([]string{tc.BucketName + "/*"}) + } + + putBucketPolicyOutput, err = bucket.PutPolicy(putBucketPolicyInput) + return err +} + +func putBucketPolicyStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putBucketPolicyOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getBucketPolicyOutput *qs.GetBucketPolicyOutput + +func getBucketPolicy() error { + getBucketPolicyOutput, err = bucket.GetPolicy() + return err +} + +func getBucketPolicyStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getBucketPolicyOutput.StatusCode), statusCode) +} + +func getBucketPolicyShouldHaveReferer(compare string) error { + for _, statement := range getBucketPolicyOutput.Statement { + if statement.Condition != nil && + statement.Condition.StringLike != nil { + + for _, referer := range statement.Condition.StringLike.Referer { + if qs.StringValue(referer) == compare { + return nil + } + } + } + } + + return fmt.Errorf("Referer \"%s\" not found in bucket policy statement", compare) +} + +// -------------------------------------------------------------------------- + +var deleteBucketPolicyOutput *qs.DeleteBucketPolicyOutput + +func deleteBucketPolicy() error { + deleteBucketPolicyOutput, err = bucket.DeletePolicy() + return err +} + +func deleteBucketPolicyStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteBucketPolicyOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/config.yaml.example b/vendor/github.com/yunify/qingstor-sdk-go/test/config.yaml.example new file mode 100644 index 000000000..7c8d4f5c7 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/config.yaml.example @@ -0,0 +1,15 @@ +# QingStor services configuration + +access_key_id: ACCESS_KEY_ID +secret_access_key: SECRET_ACCESS_KEY + +host: qingstor.com +port: 443 +protocol: https +connection_retries: 3 + +# Additional User-Agent +additional_user_agent: "test/integration" + +# Valid log levels are "debug", "info", "warn", "error", and "fatal". +log_level: debug diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/glide.lock b/vendor/github.com/yunify/qingstor-sdk-go/test/glide.lock new file mode 100644 index 000000000..1816aaa2d --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/glide.lock @@ -0,0 +1,9 @@ +hash: a498153ecdb880e9d2a81849ffae311e03bf11570e30d28f64d396d532a066c6 +updated: 2017-05-22T13:59:32.427609656+08:00 +imports: +- name: github.com/DATA-DOG/godog + version: 623ff9946e5c72834c19a019a153b8e3a338a52c + subpackages: + - colors + - gherkin +testImports: [] diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/glide.yaml b/vendor/github.com/yunify/qingstor-sdk-go/test/glide.yaml new file mode 100644 index 000000000..787386e9b --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/glide.yaml @@ -0,0 +1,4 @@ +package: github.com/yunify/qingstor-sdk-go/test +import: +- package: github.com/DATA-DOG/godog + version: v0.6.0 diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/main.go b/vendor/github.com/yunify/qingstor-sdk-go/test/main.go new file mode 100644 index 000000000..62e4e42ac --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/main.go @@ -0,0 +1,138 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "gopkg.in/yaml.v2" + "io/ioutil" + "os" + "time" + + "github.com/DATA-DOG/godog" + + "github.com/yunify/qingstor-sdk-go/config" + qsErrors "github.com/yunify/qingstor-sdk-go/request/errors" + qs "github.com/yunify/qingstor-sdk-go/service" +) + +func main() { + setUp() + + context := func(s *godog.Suite) { + ServiceFeatureContext(s) + BucketFeatureContext(s) + BucketACLFeatureContext(s) + BucketCORSFeatureContext(s) + BucketPolicyFeatureContext(s) + BucketExternalMirrorFeatureContext(s) + ObjectFeatureContext(s) + ObjectMultipartFeatureContext(s) + } + options := godog.Options{ + Format: "pretty", + Paths: []string{"./features"}, + Tags: "", + } + status := godog.RunWithOptions("*", context, options) + + //tearDown() + + os.Exit(status) +} + +func setUp() { + loadTestConfig() + loadConfig() + initQingStorService() + + err := initializeTheBucket() + checkErrorForExit(err) + + err = theBucketIsInitialized() + checkErrorForExit(err) + + //err = putBucket() + //checkError(err) + + //err = putBucketStatusCodeIs(201) + //checkError(err) +} + +func tearDown() { + err := deleteBucket() + checkError(err) + + err = deleteBucketStatusCodeIs(204) + checkError(err) + + retries := 0 + for retries < tc.MaxRetries { + deleteBucketOutput, err = bucket.Delete() + checkError(err) + if err != nil { + switch e := err.(type) { + case *qsErrors.QingStorError: + if e.Code == "bucket_not_exists" { + return + } + } + } + retries++ + time.Sleep(time.Second * time.Duration(tc.RetryWaitTime)) + } +} + +var err error +var tc *testConfig +var c *config.Config +var qsService *qs.Service + +type testConfig struct { + Zone string `json:"zone" yaml:"zone"` + BucketName string `json:"bucket_name" yaml:"bucket_name"` + + RetryWaitTime int `json:"retry_wait_time" yaml:"retry_wait_time"` + MaxRetries int `json:"max_retries" yaml:"max_retries"` +} + +func loadTestConfig() { + if tc == nil { + configYAML, err := ioutil.ReadFile("./test_config.yaml") + checkErrorForExit(err) + + tc = &testConfig{} + err = yaml.Unmarshal(configYAML, tc) + checkErrorForExit(err) + } +} + +func loadConfig() { + if c == nil { + c, err = config.NewDefault() + checkErrorForExit(err) + + err = c.LoadConfigFromFilePath("./config.yaml") + checkErrorForExit(err) + } +} + +func initQingStorService() { + if qsService == nil { + qsService, err = qs.Init(c) + checkErrorForExit(err) + } +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/object.go b/vendor/github.com/yunify/qingstor-sdk-go/test/object.go new file mode 100644 index 000000000..f7c84338e --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/object.go @@ -0,0 +1,263 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "fmt" + "io" + "net/http" + "os" + "os/exec" + + "github.com/DATA-DOG/godog" + + "github.com/yunify/qingstor-sdk-go/request" + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// ObjectFeatureContext provides feature context for object. +func ObjectFeatureContext(s *godog.Suite) { + s.Step(`^put object with key "(.{1,})"$`, putObjectWithKey) + s.Step(`^put object status code is (\d+)$`, putObjectStatusCodeIs) + + s.Step(`^copy object with key "(.{1,})"$`, copyObjectWithKey) + s.Step(`^copy object status code is (\d+)$`, copyObjectStatusCodeIs) + + s.Step(`^move object with key "(.{1,})"$`, moveObjectWithKey) + s.Step(`^move object status code is (\d+)$`, moveObjectStatusCodeIs) + + s.Step(`^get object with key "(.{1,})"$`, getObjectWithKey) + s.Step(`^get object status code is (\d+)$`, getObjectStatusCodeIs) + s.Step(`^get object content length is (\d+)$`, getObjectContentLengthIs) + + s.Step(`^get object "(.{1,})" with content type "(.{1,})"$`, getObjectWithContentType) + s.Step(`^get object content type is "(.{1,})"$`, getObjectContentTypeIs) + + s.Step(`^get object "(.{1,})" with query signature$`, getObjectWithQuerySignature) + s.Step(`^get object with query signature content length is (\d+)$`, getObjectWithQuerySignatureContentLengthIs) + + s.Step(`^head object with key "(.{1,})"$`, headObjectWithKey) + s.Step(`^head object status code is (\d+)$`, headObjectStatusCodeIs) + + s.Step(`^options object "(.{1,})" with method "([^"]*)" and origin "([^"]*)"$`, optionsObjectWithMethodAndOrigin) + s.Step(`^options object status code is (\d+)$`, optionsObjectStatusCodeIs) + + s.Step(`^delete object with key "(.{1,})"$`, deleteObjectWithKey) + s.Step(`^delete object status code is (\d+)$`, deleteObjectStatusCodeIs) + s.Step(`^delete the move object with key "(.{1,})"$`, deleteTheMoveObjectWithKey) + s.Step(`^delete the move object status code is (\d+)$`, deleteTheMoveObjectStatusCodeIs) +} + +// -------------------------------------------------------------------------- +var putObjectOutput *qs.PutObjectOutput + +func putObjectWithKey(objectKey string) error { + _, err = exec.Command("dd", "if=/dev/zero", "of=/tmp/sdk_bin", "bs=1024", "count=1").Output() + if err != nil { + return err + } + defer os.Remove("/tmp/sdk_bin") + + file, err := os.Open("/tmp/sdk_bin") + if err != nil { + return err + } + defer file.Close() + + hash := md5.New() + _, err = io.Copy(hash, file) + hashInBytes := hash.Sum(nil)[:16] + md5String := hex.EncodeToString(hashInBytes) + + //file.Seek(0, io.SeekStart) + file.Seek(0, 0) + putObjectOutput, err = bucket.PutObject(objectKey, &qs.PutObjectInput{ + ContentType: qs.String("text/plain"), + ContentMD5: qs.String(md5String), + Body: file, + }) + return err +} + +func putObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(putObjectOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- +var copyObjectOutput *qs.PutObjectOutput + +func copyObjectWithKey(objectKey string) error { + copyObjectKey := fmt.Sprintf(`%s_copy`, objectKey) + copyObjectOutput, err = bucket.PutObject(copyObjectKey, &qs.PutObjectInput{ + XQSCopySource: qs.String(fmt.Sprintf(`/%s/%s`, tc.BucketName, objectKey)), + }) + return err +} + +func copyObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(copyObjectOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- +var moveObjectOutput *qs.PutObjectOutput + +func moveObjectWithKey(objectKey string) error { + copyObjectKey := fmt.Sprintf(`%s_copy`, objectKey) + moveObjectKey := fmt.Sprintf(`%s_move`, objectKey) + moveObjectOutput, err = bucket.PutObject(moveObjectKey, &qs.PutObjectInput{ + XQSMoveSource: qs.String(fmt.Sprintf(`/%s/%s`, tc.BucketName, copyObjectKey)), + }) + return err +} + +func moveObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(moveObjectOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var getObjectOutput *qs.GetObjectOutput + +func getObjectWithKey(objectKey string) error { + getObjectOutput, err = bucket.GetObject(objectKey, nil) + return err +} + +func getObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(getObjectOutput.StatusCode), statusCode) +} + +func getObjectContentLengthIs(length int) error { + buffer := &bytes.Buffer{} + buffer.ReadFrom(getObjectOutput.Body) + getObjectOutput.Body.Close() + return checkEqual(len(buffer.Bytes())*1024, length) +} + +// -------------------------------------------------------------------------- + +var getObjectWithContentTypeRequest *request.Request + +func getObjectWithContentType(objectKey, contentType string) error { + getObjectWithContentTypeRequest, _, err = bucket.GetObjectRequest( + objectKey, + &qs.GetObjectInput{ + ResponseContentType: qs.String(contentType), + }, + ) + if err != nil { + return err + } + err = getObjectWithContentTypeRequest.Send() + if err != nil { + return err + } + return nil +} + +func getObjectContentTypeIs(contentType string) error { + return checkEqual(getObjectWithContentTypeRequest.HTTPResponse.Header.Get("Content-Type"), contentType) +} + +// -------------------------------------------------------------------------- + +var getObjectWithQuerySignatureURL string + +func getObjectWithQuerySignature(objectKey string) error { + r, _, err := bucket.GetObjectRequest(objectKey, nil) + if err != nil { + return err + } + + err = r.SignQuery(10) + if err != nil { + return err + } + + getObjectWithQuerySignatureURL = r.HTTPRequest.URL.String() + return nil +} + +func getObjectWithQuerySignatureContentLengthIs(length int) error { + out, err := http.Get(getObjectWithQuerySignatureURL) + if err != nil { + return err + } + buffer := &bytes.Buffer{} + buffer.ReadFrom(out.Body) + out.Body.Close() + + return checkEqual(len(buffer.Bytes())*1024, length) +} + +// -------------------------------------------------------------------------- + +var headObjectOutput *qs.HeadObjectOutput + +func headObjectWithKey(objectKey string) error { + headObjectOutput, err = bucket.HeadObject(objectKey, nil) + return err +} + +func headObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(headObjectOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var optionsObjectOutput *qs.OptionsObjectOutput + +func optionsObjectWithMethodAndOrigin(objectKey, method, origin string) error { + optionsObjectOutput, err = bucket.OptionsObject( + objectKey, + &qs.OptionsObjectInput{ + AccessControlRequestMethod: qs.String(method), + Origin: qs.String(origin), + }, + ) + return err +} + +func optionsObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(optionsObjectOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var deleteObjectOutput *qs.DeleteObjectOutput +var deleteTheMoveObjectOutput *qs.DeleteObjectOutput + +func deleteObjectWithKey(objectKey string) error { + deleteObjectOutput, err = bucket.DeleteObject(objectKey) + return err +} + +func deleteObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteObjectOutput.StatusCode), statusCode) +} + +func deleteTheMoveObjectWithKey(objectKey string) error { + deleteTheMoveObjectOutput, err = bucket.DeleteObject(fmt.Sprintf(`%s_move`, objectKey)) + return err +} + +func deleteTheMoveObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteTheMoveObjectOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/object_multipart.go b/vendor/github.com/yunify/qingstor-sdk-go/test/object_multipart.go new file mode 100644 index 000000000..fd504ce7f --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/object_multipart.go @@ -0,0 +1,238 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "os" + "os/exec" + + "github.com/DATA-DOG/godog" + + "fmt" + "github.com/yunify/qingstor-sdk-go/request/errors" + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// ObjectMultipartFeatureContext provides feature context for object multipart. +func ObjectMultipartFeatureContext(s *godog.Suite) { + s.Step(`^initiate multipart upload with key "(.{1,})"$`, initiateMultipartUploadWithKey) + s.Step(`^initiate multipart upload status code is (\d+)$`, initiateMultipartUploadStatusCodeIs) + + s.Step(`^upload the first part with key "(.{1,})"$`, uploadTheFirstPartWithKey) + s.Step(`^upload the first part status code is (\d+)$`, uploadTheFirstPartStatusCodeIs) + s.Step(`^upload the second part with key "(.{1,})"$`, uploadTheSecondPartWithKey) + s.Step(`^upload the second part status code is (\d+)$`, uploadTheSecondPartStatusCodeIs) + s.Step(`^upload the third part with key "(.{1,})"$`, uploadTheThirdPartWithKey) + s.Step(`^upload the third part status code is (\d+)$`, uploadTheThirdPartStatusCodeIs) + + s.Step(`^list multipart with key "(.{1,})"$`, listMultipartWithKey) + s.Step(`^list multipart status code is (\d+)$`, listMultipartStatusCodeIs) + s.Step(`^list multipart object parts count is (\d+)$`, listMultipartObjectPartsCountIs) + + s.Step(`^complete multipart upload with key "(.{1,})"$`, completeMultipartUploadWithKey) + s.Step(`^complete multipart upload status code is (\d+)$`, completeMultipartUploadStatusCodeIs) + + s.Step(`^abort multipart upload with key "(.{1,})"$`, abortMultipartUploadWithKey) + s.Step(`^abort multipart upload status code is (\d+)$`, abortMultipartUploadStatusCodeIs) + + s.Step(`^delete the multipart object with key "(.{1,})"$`, deleteTheMultipartObjectWithKey) + s.Step(`^delete the multipart object status code is (\d+)$`, deleteTheMultipartObjectStatusCodeIs) +} + +// -------------------------------------------------------------------------- + +var initiateMultipartUploadOutput *qs.InitiateMultipartUploadOutput + +func initiateMultipartUploadWithKey(objectKey string) error { + initiateMultipartUploadOutput, err = bucket.InitiateMultipartUpload( + objectKey, + &qs.InitiateMultipartUploadInput{ + ContentType: qs.String("text/plain"), + }, + ) + return err +} + +func initiateMultipartUploadStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(initiateMultipartUploadOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var uploadTheFirstPartOutput *qs.UploadMultipartOutput +var uploadTheSecondPartOutput *qs.UploadMultipartOutput +var uploadTheThirdPartOutput *qs.UploadMultipartOutput + +func uploadTheFirstPartWithKey(objectKey string) error { + _, err = exec.Command("dd", "if=/dev/zero", "of=/tmp/sdk_bin_part_0", "bs=1048576", "count=5").Output() + if err != nil { + return err + } + defer os.Remove("/tmp/sdk_bin_part_0") + + file, err := os.Open("/tmp/sdk_bin_part_0") + if err != nil { + return err + } + defer file.Close() + + uploadTheFirstPartOutput, err = bucket.UploadMultipart( + objectKey, + &qs.UploadMultipartInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + PartNumber: qs.Int(0), + Body: file, + }, + ) + return err +} + +func uploadTheFirstPartStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(uploadTheFirstPartOutput.StatusCode), statusCode) +} + +func uploadTheSecondPartWithKey(objectKey string) error { + _, err = exec.Command("dd", "if=/dev/zero", "of=/tmp/sdk_bin_part_1", "bs=1048576", "count=4").Output() + if err != nil { + return err + } + defer os.Remove("/tmp/sdk_bin_part_1") + + file, err := os.Open("/tmp/sdk_bin_part_1") + if err != nil { + return err + } + defer file.Close() + + uploadTheSecondPartOutput, err = bucket.UploadMultipart( + objectKey, + &qs.UploadMultipartInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + PartNumber: qs.Int(1), + Body: file, + }, + ) + return err +} + +func uploadTheSecondPartStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(uploadTheSecondPartOutput.StatusCode), statusCode) +} + +func uploadTheThirdPartWithKey(objectKey string) error { + _, err = exec.Command("dd", "if=/dev/zero", "of=/tmp/sdk_bin_part_2", "bs=1048576", "count=3").Output() + if err != nil { + return err + } + defer os.Remove("/tmp/sdk_bin_part_2") + + file, err := os.Open("/tmp/sdk_bin_part_2") + if err != nil { + return err + } + defer file.Close() + + uploadTheThirdPartOutput, err = bucket.UploadMultipart( + objectKey, + &qs.UploadMultipartInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + PartNumber: qs.Int(2), + Body: file, + }, + ) + return err +} + +func uploadTheThirdPartStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(uploadTheThirdPartOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +var listMultipartOutput *qs.ListMultipartOutput + +func listMultipartWithKey(objectKey string) error { + listMultipartOutput, err = bucket.ListMultipart( + objectKey, + &qs.ListMultipartInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + }, + ) + return err +} + +func listMultipartStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(listMultipartOutput.StatusCode), statusCode) +} + +func listMultipartObjectPartsCountIs(count int) error { + return checkEqual(len(listMultipartOutput.ObjectParts), count) +} + +// -------------------------------------------------------------------------- + +var completeMultipartUploadOutput *qs.CompleteMultipartUploadOutput + +func completeMultipartUploadWithKey(objectKey string) error { + completeMultipartUploadOutput, err = bucket.CompleteMultipartUpload( + objectKey, + &qs.CompleteMultipartUploadInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + ETag: qs.String(`"4072783b8efb99a9e5817067d68f61c6"`), + ObjectParts: listMultipartOutput.ObjectParts, + }, + ) + return err +} + +func completeMultipartUploadStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(completeMultipartUploadOutput.StatusCode), statusCode) +} + +// -------------------------------------------------------------------------- + +func abortMultipartUploadWithKey(objectKey string) error { + _, err = bucket.AbortMultipartUpload( + objectKey, + &qs.AbortMultipartUploadInput{ + UploadID: initiateMultipartUploadOutput.UploadID, + }, + ) + return nil +} + +func abortMultipartUploadStatusCodeIs(statusCode int) error { + switch e := err.(type) { + case *errors.QingStorError: + return checkEqual(e.StatusCode, statusCode) + } + + return fmt.Errorf("abort multipart upload should get \"%d\"", statusCode) +} + +// -------------------------------------------------------------------------- + +var deleteTheMultipartObjectOutput *qs.DeleteObjectOutput + +func deleteTheMultipartObjectWithKey(objectKey string) error { + deleteTheMultipartObjectOutput, err = bucket.DeleteObject(objectKey) + return err +} + +func deleteTheMultipartObjectStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(deleteTheMultipartObjectOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/service.go b/vendor/github.com/yunify/qingstor-sdk-go/test/service.go new file mode 100644 index 000000000..f9f4ace67 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/service.go @@ -0,0 +1,60 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "errors" + + "github.com/DATA-DOG/godog" + + qs "github.com/yunify/qingstor-sdk-go/service" +) + +// ServiceFeatureContext provides feature context for service. +func ServiceFeatureContext(s *godog.Suite) { + s.Step(`^initialize QingStor service$`, initializeQingStorService) + s.Step(`^the QingStor service is initialized$`, theQingStorServiceIsInitialized) + + s.Step(`^list buckets$`, listBuckets) + s.Step(`^list buckets status code is (\d+)$`, listBucketsStatusCodeIs) +} + +// -------------------------------------------------------------------------- + +func initializeQingStorService() error { + return nil +} + +func theQingStorServiceIsInitialized() error { + if qsService == nil { + return errors.New("QingStor service is not initialized") + } + return nil +} + +// -------------------------------------------------------------------------- + +var listBucketsOutput *qs.ListBucketsOutput + +func listBuckets() error { + listBucketsOutput, err = qsService.ListBuckets(nil) + return err +} + +func listBucketsStatusCodeIs(statusCode int) error { + return checkEqual(qs.IntValue(listBucketsOutput.StatusCode), statusCode) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/test_config.yaml.example b/vendor/github.com/yunify/qingstor-sdk-go/test/test_config.yaml.example new file mode 100644 index 000000000..7d497d385 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/test_config.yaml.example @@ -0,0 +1,7 @@ +# Test configurations + +zone: pek3a +bucket_name: access-key-id + +retry_wait_time: 3 # seconds +max_retries: 60 diff --git a/vendor/github.com/yunify/qingstor-sdk-go/test/utils.go b/vendor/github.com/yunify/qingstor-sdk-go/test/utils.go new file mode 100644 index 000000000..9c8b53c57 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/test/utils.go @@ -0,0 +1,54 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package main + +import ( + "fmt" + "os" + "reflect" +) + +func checkError(err error) { + if err != nil { + fmt.Println(err.Error()) + } +} + +func checkErrorForExit(err error, code ...int) { + if err != nil { + exitCode := 1 + if len(code) > 0 { + exitCode = code[0] + } + fmt.Println(err.Error(), exitCode) + os.Exit(exitCode) + } +} + +func checkEqual(value, shouldBe interface{}) error { + if value == nil || shouldBe == nil { + if value == shouldBe { + return nil + } + } else { + if reflect.DeepEqual(value, shouldBe) { + return nil + } + } + + return fmt.Errorf("Value \"%v\" should be \"%v\"", value, shouldBe) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/utils/escape.go b/vendor/github.com/yunify/qingstor-sdk-go/utils/escape.go new file mode 100644 index 000000000..e1988db90 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/utils/escape.go @@ -0,0 +1,23 @@ +package utils + +import ( + "net/url" + "strings" +) + +// URLQueryEscape escapes the original string. +func URLQueryEscape(origin string) string { + escaped := url.QueryEscape(origin) + escaped = strings.Replace(escaped, "%2F", "/", -1) + escaped = strings.Replace(escaped, "%3D", "=", -1) + escaped = strings.Replace(escaped, "+", "%20", -1) + return escaped +} + +// URLQueryUnescape unescapes the escaped string. +func URLQueryUnescape(escaped string) (string, error) { + escaped = strings.Replace(escaped, "/", "%2F", -1) + escaped = strings.Replace(escaped, "=", "%3D", -1) + escaped = strings.Replace(escaped, "%20", " ", -1) + return url.QueryUnescape(escaped) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/utils/escape_test.go b/vendor/github.com/yunify/qingstor-sdk-go/utils/escape_test.go new file mode 100644 index 000000000..2f37250d0 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/utils/escape_test.go @@ -0,0 +1,30 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestURLQueryEscape(t *testing.T) { + assert.Equal(t, "/", URLQueryEscape("/")) + assert.Equal(t, "%20", URLQueryEscape(" ")) + assert.Equal(t, + "%21%40%23%24%25%5E%26%2A%28%29_%2B%7B%7D%7C", + URLQueryEscape("!@#$%^&*()_+{}|"), + ) +} + +func TestURLQueryUnescape(t *testing.T) { + x, err := URLQueryUnescape("/") + assert.Nil(t, err) + assert.Equal(t, "/", x) + + x, err = URLQueryUnescape("%20") + assert.Nil(t, err) + assert.Equal(t, " ", x) + + x, err = URLQueryUnescape("%21%40%23%24%25%5E%26%2A%28%29_%2B%7B%7D%7C") + assert.Nil(t, err) + assert.Equal(t, "!@#$%^&*()_+{}|", x) +} diff --git a/vendor/github.com/yunify/qingstor-sdk-go/version.go b/vendor/github.com/yunify/qingstor-sdk-go/version.go new file mode 100644 index 000000000..9fb5ca468 --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/version.go @@ -0,0 +1,23 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +// Package sdk is the official QingStor SDK for the Go programming language. +// +// https://github.com/yunify/qingstor-sdk-go +package sdk + +// Version number. +const Version = "2.2.5" diff --git a/vendor/github.com/yunify/qingstor-sdk-go/version_test.go b/vendor/github.com/yunify/qingstor-sdk-go/version_test.go new file mode 100644 index 000000000..fedc2bd5a --- /dev/null +++ b/vendor/github.com/yunify/qingstor-sdk-go/version_test.go @@ -0,0 +1,28 @@ +// +------------------------------------------------------------------------- +// | Copyright (C) 2016 Yunify, Inc. +// +------------------------------------------------------------------------- +// | Licensed under the Apache License, Version 2.0 (the "License"); +// | you may not use this work except in compliance with the License. +// | You may obtain a copy of the License in the LICENSE file, or at: +// | +// | http://www.apache.org/licenses/LICENSE-2.0 +// | +// | Unless required by applicable law or agreed to in writing, software +// | distributed under the License is distributed on an "AS IS" BASIS, +// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// | See the License for the specific language governing permissions and +// | limitations under the License. +// +------------------------------------------------------------------------- + +package sdk + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestVersion(t *testing.T) { + assert.Equal(t, "string", reflect.ValueOf(Version).Type().String()) +}