dep ensure -update (#1001)

* dep ensure -update

Run "dep ensure -update` to update all dependencies.

No code changes; just the dependencies.

* dep prune

* add new venderod
This commit is contained in:
Miek Gieben 2017-08-28 17:49:28 +02:00 committed by Yong Tang
parent 558f4bea41
commit 7e63bdbee8
407 changed files with 32999 additions and 24546 deletions

82
Gopkg.lock generated
View file

@ -3,9 +3,9 @@
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","internal"]
revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c"
version = "v0.7.0"
packages = ["compute/metadata"]
revision = "0f0b8420cb699ac4ce059c63bac263f4301fe95b"
version = "v0.12.0"
[[projects]]
name = "github.com/PuerkitoBio/purell"
@ -17,7 +17,7 @@
branch = "master"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
revision = "bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5"
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
name = "github.com/Shopify/sarama"
@ -40,20 +40,26 @@
[[projects]]
name = "github.com/blang/semver"
packages = ["."]
revision = "b38d23b8782a487059e8fc8773e9a5b228a77cb6"
version = "v3.5.0"
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
name = "github.com/coreos/etcd"
packages = ["client","pkg/pathutil","pkg/types"]
revision = "d267ca9c184e953554257d0acdd1dc9c47d38229"
version = "v3.1.8"
packages = ["client","pkg/pathutil","pkg/srv","pkg/types","version"]
revision = "9d43462d174c664f5edf313dec0de31e1ef4ed47"
version = "v3.2.6"
[[projects]]
branch = "master"
name = "github.com/coreos/go-oidc"
packages = ["http","jose","key","oauth2","oidc"]
revision = "c797a55f1c1001ec3169f1d0fbb4c5523563bec6"
revision = "a4973d9a4225417aecf5d450a9522f00c1f7130f"
[[projects]]
name = "github.com/coreos/go-semver"
packages = ["semver"]
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
version = "v0.2.0"
[[projects]]
name = "github.com/coreos/pkg"
@ -76,8 +82,8 @@
[[projects]]
name = "github.com/docker/distribution"
packages = ["digest","reference"]
revision = "a25b9ef0c9fe242ac04bb20d3a028442b7d266b6"
version = "v2.6.1"
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
version = "v2.6.2"
[[projects]]
name = "github.com/eapache/go-resiliency"
@ -143,13 +149,13 @@
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "e51c28f07047ad90caff03f6450908720d337e0c"
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "e43299b4afa7bc7f22e5e82e3d48607230e4c177"
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
[[projects]]
name = "github.com/gogo/protobuf"
@ -167,7 +173,7 @@
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","ptypes/any"]
revision = "5a0f697c9ed9d68fef0116532c6e05cfeae00e55"
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
[[projects]]
branch = "master"
@ -179,13 +185,7 @@
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c"
[[projects]]
branch = "master"
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
branch = "master"
@ -227,7 +227,7 @@
branch = "master"
name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"]
revision = "44c0351a5bc860bcb2608d54aa03ea686c4e7b25"
revision = "3b3bbef4e2d9a6f24d7ee73a894afbc064f0e057"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
@ -244,14 +244,14 @@
[[projects]]
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [".","_thrift/gen-go/scribe","_thrift/gen-go/zipkincore","flag","types","wire"]
revision = "6022d4d3ed39632fad842942bda1813a9b4f63c8"
version = "v0.2.3"
revision = "d88c90182f3fb514be54a1c7adc11a04d41c7da9"
version = "v0.2.4"
[[projects]]
name = "github.com/pborman/uuid"
packages = ["."]
revision = "a97ce2ca70fa5a848076093f05e639a89ca34d06"
version = "v1.0"
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
version = "v1.1"
[[projects]]
name = "github.com/pierrec/lz4"
@ -281,13 +281,13 @@
branch = "master"
name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
revision = "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207"
revision = "61f87aac8082fa8c3c5655c7608d7478d46ac2ad"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [".","xfs"]
revision = "65c1f6f8f0fc1e2185eb9863a3bc751496404259"
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2"
[[projects]]
branch = "master"
@ -296,40 +296,40 @@
revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
[[projects]]
branch = "master"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "708a42d246822952f38190a8d8c4e6b16a0e600c"
revision = "8c0409fcbb70099c748d71f714529204975f6c3f"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
revision = "81e90905daefcd6fd217b62423c0908922eadb30"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "f047394b6d14284165300fd82dad67edb3a4d7f6"
revision = "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "b90f89a1e7a9c1f6b918820b3daa7f08488c8594"
packages = ["unix","windows"]
revision = "2d6f6f883a06fc0d5f4b14a81e4c28705ea64c15"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm","width"]
revision = "4ee4af566555f5fbe026368b75596286a312663a"
revision = "ac87088df8ef557f1e32cd00ed0b6fbc3f7ddafb"
[[projects]]
name = "google.golang.org/appengine"
@ -341,13 +341,13 @@
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "aa2eb687b4d3e17154372564ad8d6bf11c3cf21f"
revision = "ee236bd376b077c7a89f260c026c4735b195e459"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
revision = "d2e1b51f33ff8c5e4a15560ff049d200e83726c5"
version = "v1.3.0"
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
revision = "b3ddf786825de56a4178401b7e174ee332173b66"
version = "v1.5.2"
[[projects]]
name = "gopkg.in/inf.v0"
@ -359,7 +359,7 @@
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b"
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]]
name = "k8s.io/client-go"

View file

@ -7,9 +7,9 @@ go:
install:
- go get -v cloud.google.com/go/...
script:
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
go test -race -v cloud.google.com/go/...
./run-tests.sh $TRAVIS_COMMIT
env:
matrix:
# The GCLOUD_TESTS_API_KEY environment variable.

View file

@ -4,6 +4,12 @@
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
1. You will need to ensure that your `GOBIN` directory (by default
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
1. If you would like, you may want to set up aliases for git-codereview,
such that `git codereview change` becomes `git change`. See the
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details.
1. Should you run into issues with the git-codereview tool, please note
that all error messages will assume that you have set up these
aliases.
1. Get the cloud package by running `go get -d cloud.google.com/go`.
1. If you have already checked out the source, make sure that the remote git
origin is https://code.googlesource.com/gocloud:

54
vendor/cloud.google.com/go/MIGRATION.md generated vendored Normal file
View file

@ -0,0 +1,54 @@
# Code Changes
## v0.10.0
- pubsub: Replace
```
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
```
with
```
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
})
```
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
Given an initialized `*trace.Client` named `tc`, instead of
```
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
```
write
```
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
```
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
Instead of
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
```
write
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
```
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
interceptor as a dial option as shown below when initializing Cloud package
clients:
```
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
if err != nil {
...
}
```

242
vendor/cloud.google.com/go/README.md generated vendored
View file

@ -1,22 +1,21 @@
# Google Cloud for Go
# Google Cloud Client Libraries for Go
[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
``` go
import "cloud.google.com/go"
```
Go packages for Google Cloud Platform services.
To install the packages on your system,
```
$ go get -u cloud.google.com/go/...
```
**NOTE:** These packages are under development, and may occasionally make
backwards-incompatible changes.
**NOTE:** Some of these packages are under development, and may occasionally
make backwards-incompatible changes.
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
@ -34,136 +33,61 @@ backwards-incompatible changes.
## News
_February 14, 2017_
_August 22, 2017+
Release of a client library for Spanner. See
the
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
*v0.12.0*
Note that although the Spanner service is beta, the Go client library is alpha.
- pubsub: Subscription.Receive now uses streaming pull.
_December 12, 2016_
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
Beta release of BigQuery, DataStore, Logging and Storage. See the
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
- errors: renamed errorreporting. The errors package will be removed shortly.
Also, BigQuery now supports structs. Read a row directly into a struct with
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
for details.
- datastore: improved retry behavior.
_December 5, 2016_
- bigquery: support updates to dataset metadata, with etags.
More changes to BigQuery:
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
* The `ValueList` type was removed. It is no longer necessary. Instead of
```go
var v ValueList
... it.Next(&v) ..
```
use
- bigquery: generate all job IDs on the client.
```go
var v []Value
... it.Next(&v) ...
```
- storage: support bucket lifecycle configurations.
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
`ValueList` would append to the slice. Now each call resets the size to zero first.
* Schema inference will infer the SQL type BYTES for a struct field of
type []byte. Previously it inferred STRING.
_July 31, 2017_
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
inference. BigQuery's integer type is INT64, and those types may hold values
that are not correctly represented in a 64-bit signed integer.
*v0.11.0*
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
package.
- Clients for spanner, pubsub and video are now in beta.
_November 17, 2016_
- New client for DLP.
Change to BigQuery: values from INTEGER columns will now be returned as int64,
not int. This will avoid errors arising from large values on 32-bit systems.
- spanner: performance and testing improvements.
_November 8, 2016_
- storage: requester-pays buckets are supported.
New datastore feature: datastore now encodes your nested Go structs as Entity values,
instead of a flattened list of the embedded struct's fields.
This means that you may now have twice-nested slices, eg.
```go
type State struct {
Cities []struct{
Populations []int
}
}
```
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
more details.
- pubsub: bug fixes and other minor improvements
_November 8, 2016_
_June 17, 2017_
Breaking changes to datastore: contexts no longer hold namespaces; instead you
must set a key's namespace explicitly. Also, key functions have been changed
and renamed.
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
```go
q := datastore.NewQuery("Kind").Namespace("ns")
```
*v0.10.0*
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
```go
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
```
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
```go
NewIncompleteKey(ctx, kind, parent)
```
with
```go
IncompleteKey(kind, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
- vision: cloud.google.com/go/vision is deprecated. Use
cloud.google.com/go/vision/apiv1 instead.
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
```go
NewKey(ctx, kind, name, 0, parent)
NewKey(ctx, kind, "", id, parent)
```
with
```go
NameKey(kind, name, parent)
IDKey(kind, id, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
- translation: now stable.
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
- trace: several changes to the surface. See the link below.
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
more details.
_October 27, 2016_
Breaking change to bigquery: `NewGCSReference` is now a function,
not a method on `Client`.
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
loading data into a table from a file or any `io.Reader`.
_October 21, 2016_
Breaking change to pubsub: removed `pubsub.Done`.
Use `iterator.Done` instead, where `iterator` is the package
`google.golang.org/api/iterator`.
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
@ -171,17 +95,22 @@ Use `iterator.Done` instead, where `iterator` is the package
## Supported APIs
Google API | Status | Package
-------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
---------------------------------|--------------|-----------------------------------------------------------
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref]
[Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | beta | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | beta | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
> **Alpha status**: the API is still being actively developed. As a
@ -215,6 +144,7 @@ By default, each API will use [Google Application Default Credentials][default-c
for authorization credentials used in calling the API endpoints. This will allow your
application to run in many environments without requiring explicit configuration.
[snip]:# (auth)
```go
client, err := storage.NewClient(ctx)
```
@ -225,6 +155,7 @@ pass
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
to the `NewClient` function of the desired package. For example:
[snip]:# (auth-JSON)
```go
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
```
@ -234,6 +165,7 @@ You can exert more control over authorization by using the
create an `oauth2.TokenSource`. Then pass
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
to the `NewClient` function:
[snip]:# (auth-ts)
```go
tokenSource := ...
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
@ -251,6 +183,7 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
First create a `datastore.Client` to use throughout your application:
[snip]:# (datastore-1)
```go
client, err := datastore.NewClient(ctx, "my-project-id")
if err != nil {
@ -260,6 +193,7 @@ if err != nil {
Then use that client to interact with the API:
[snip]:# (datastore-2)
```go
type Post struct {
Title string
@ -267,8 +201,8 @@ type Post struct {
PublishedAt time.Time
}
keys := []*datastore.Key{
datastore.NewKey(ctx, "Post", "post1", 0, nil),
datastore.NewKey(ctx, "Post", "post2", 0, nil),
datastore.NameKey("Post", "post1", nil),
datastore.NameKey("Post", "post2", nil),
}
posts := []*Post{
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
@ -290,6 +224,7 @@ if _, err := client.PutMulti(ctx, keys, posts); err != nil {
First create a `storage.Client` to use throughout your application:
[snip]:# (storage-1)
```go
client, err := storage.NewClient(ctx)
if err != nil {
@ -297,6 +232,7 @@ if err != nil {
}
```
[snip]:# (storage-2)
```go
// Read the object1 from bucket.
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
@ -321,6 +257,7 @@ if err != nil {
First create a `pubsub.Client` to use throughout your application:
[snip]:# (pubsub-1)
```go
client, err := pubsub.NewClient(ctx, "project-id")
if err != nil {
@ -330,36 +267,30 @@ if err != nil {
Then use the client to publish and subscribe:
[snip]:# (pubsub-2)
```go
// Publish "hello world" on topic1.
topic := client.Topic("topic1")
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
res := topic.Publish(ctx, &pubsub.Message{
Data: []byte("hello world"),
})
// The publish happens asynchronously.
// Later, you can get the result from res:
...
msgID, err := res.Get(ctx)
if err != nil {
log.Fatal(err)
}
// Create an iterator to pull messages via subscription1.
it, err := client.Subscription("subscription1").Pull(ctx)
// Use a callback to receive messages via subscription1.
sub := client.Subscription("subscription1")
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
fmt.Println(m.Data)
m.Ack() // Acknowledge that we've consumed the message.
})
if err != nil {
log.Println(err)
}
defer it.Stop()
// Consume N messages from the iterator.
for i := 0; i < N; i++ {
msg, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatalf("Failed to retrieve message: %v", err)
}
fmt.Printf("Message %d: %s\n", i, msg.Data)
msg.Done(true) // Acknowledge that we've consumed the message.
}
```
## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
@ -372,13 +303,16 @@ for i := 0; i < N; i++ {
### Example Usage
First create a `bigquery.Client` to use throughout your application:
[snip]:# (bq-1)
```go
c, err := bigquery.NewClient(ctx, "my-project-ID")
if err != nil {
// TODO: Handle error.
}
```
Then use that client to interact with the API:
[snip]:# (bq-2)
```go
// Construct a query.
q := c.Query(`
@ -418,7 +352,7 @@ for {
### Example Usage
First create a `logging.Client` to use throughout your application:
[snip]:# (logging-1)
```go
ctx := context.Background()
client, err := logging.NewClient(ctx, "my-project")
@ -426,13 +360,17 @@ if err != nil {
// TODO: Handle error.
}
```
Usually, you'll want to add log entries to a buffer to be periodically flushed
(automatically and asynchronously) to the Stackdriver Logging service.
[snip]:# (logging-2)
```go
logger := client.Logger("my-log")
logger.Log(logging.Entry{Payload: "something happened!"})
```
Close your client before your program exits, to flush any buffered log entries.
[snip]:# (logging-3)
```go
err = client.Close()
if err != nil {
@ -440,7 +378,6 @@ if err != nil {
}
```
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)
- [About Cloud Spanner][cloud-spanner]
@ -451,6 +388,7 @@ if err != nil {
First create a `spanner.Client` to use throughout your application:
[snip]:# (spanner-1)
```go
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
if err != nil {
@ -458,9 +396,10 @@ if err != nil {
}
```
[snip]:# (spanner-2)
```go
// Simple Reads And Writes
_, err := client.Apply(ctx, []*spanner.Mutation{
_, err = client.Apply(ctx, []*spanner.Mutation{
spanner.Insert("Users",
[]string{"name", "email"},
[]interface{}{"alice", "a@example.com"})})
@ -512,17 +451,32 @@ for more information.
[cloud-logging-docs]: https://cloud.google.com/logging/docs
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
[cloud-vision]: https://cloud.google.com/vision/
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
[cloud-monitoring]: https://cloud.google.com/monitoring/
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
[cloud-vision]: https://cloud.google.com/vision
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
[cloud-speech]: https://cloud.google.com/speech
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
[cloud-spanner]: https://cloud.google.com/spanner/
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
[cloud-translation]: https://cloud.google.com/translation
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

View file

@ -34,8 +34,6 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"cloud.google.com/go/internal"
)
const (
@ -48,6 +46,8 @@ const (
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
@ -65,25 +65,21 @@ var (
var (
metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
},
}
subscribeClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
},
}
)
@ -132,6 +128,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error)
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
@ -202,7 +199,9 @@ func testOnGCE() bool {
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
if err != nil {
resc <- false
return

View file

@ -1,64 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides support for the cloud packages.
//
// Users should not import this package directly.
package internal
import (
"fmt"
"net/http"
)
const userAgent = "gcloud-golang/0.1"
// Transport is an http.RoundTripper that appends Google Cloud client's
// user-agent to the original request's user-agent header.
type Transport struct {
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
// Do User-Agent some other way.
// Base is the actual http.RoundTripper
// requests will use. It must not be nil.
Base http.RoundTripper
}
// RoundTrip appends a user-agent to the existing user-agent
// header and delegates the request to the base http.RoundTripper.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req)
ua := req.Header.Get("User-Agent")
if ua == "" {
ua = userAgent
} else {
ua = fmt.Sprintf("%s %s", ua, userAgent)
}
req.Header.Set("User-Agent", ua)
return t.Base.RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}

View file

@ -1,55 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
)
// Retry calls the supplied function f repeatedly according to the provided
// backoff parameters. It returns when one of the following occurs:
// When f's first return value is true, Retry immediately returns with f's second
// return value.
// When the provided context is done, Retry returns with ctx.Err().
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
return retry(ctx, bo, f, gax.Sleep)
}
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
sleep func(context.Context, time.Duration) error) error {
var lastErr error
for {
stop, err := f()
if stop {
return err
}
// Remember the last "real" error from f.
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
lastErr = err
}
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
}
return cerr
}
}
}

View file

@ -1,64 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"errors"
"testing"
"time"
"golang.org/x/net/context"
gax "github.com/googleapis/gax-go"
)
func TestRetry(t *testing.T) {
ctx := context.Background()
// Without a context deadline, retry will run until the function
// says not to retry any more.
n := 0
endRetry := errors.New("end retry")
err := retry(ctx, gax.Backoff{},
func() (bool, error) {
n++
if n < 10 {
return false, nil
}
return true, endRetry
},
func(context.Context, time.Duration) error { return nil })
if got, want := err, endRetry; got != want {
t.Errorf("got %v, want %v", err, endRetry)
}
if n != 10 {
t.Errorf("n: got %d, want %d", n, 10)
}
// If the context has a deadline, sleep will return an error
// and end the function.
n = 0
err = retry(ctx, gax.Backoff{},
func() (bool, error) { return false, nil },
func(context.Context, time.Duration) error {
n++
if n < 10 {
return nil
}
return context.DeadlineExceeded
})
if err == nil {
t.Error("got nil, want error")
}
}

Binary file not shown.

View file

@ -1,3 +1,142 @@
_March 17, 2017_
Breaking Pubsub changes.
* Publish is now asynchronous
([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
* Message.Done replaced with Message.Ack and Message.Nack.
_February 14, 2017_
Release of a client library for Spanner. See
the
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
Note that although the Spanner service is beta, the Go client library is alpha.
_December 12, 2016_
Beta release of BigQuery, DataStore, Logging and Storage. See the
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
Also, BigQuery now supports structs. Read a row directly into a struct with
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
for details.
_December 5, 2016_
More changes to BigQuery:
* The `ValueList` type was removed. It is no longer necessary. Instead of
```go
var v ValueList
... it.Next(&v) ..
```
use
```go
var v []Value
... it.Next(&v) ...
```
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
`ValueList` would append to the slice. Now each call resets the size to zero first.
* Schema inference will infer the SQL type BYTES for a struct field of
type []byte. Previously it inferred STRING.
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
inference. BigQuery's integer type is INT64, and those types may hold values
that are not correctly represented in a 64-bit signed integer.
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
package.
_November 17, 2016_
Change to BigQuery: values from INTEGER columns will now be returned as int64,
not int. This will avoid errors arising from large values on 32-bit systems.
_November 8, 2016_
New datastore feature: datastore now encodes your nested Go structs as Entity values,
instead of a flattened list of the embedded struct's fields.
This means that you may now have twice-nested slices, eg.
```go
type State struct {
Cities []struct{
Populations []int
}
}
```
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
more details.
_November 8, 2016_
Breaking changes to datastore: contexts no longer hold namespaces; instead you
must set a key's namespace explicitly. Also, key functions have been changed
and renamed.
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
```go
q := datastore.NewQuery("Kind").Namespace("ns")
```
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
```go
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
```
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
```go
NewIncompleteKey(ctx, kind, parent)
```
with
```go
IncompleteKey(kind, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
```go
NewKey(ctx, kind, name, 0, parent)
NewKey(ctx, kind, "", id, parent)
```
with
```go
NameKey(kind, name, parent)
IDKey(kind, id, parent)
```
and if you do use namespaces, make sure you set the namespace on the returned key.
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
more details.
_October 27, 2016_
Breaking change to bigquery: `NewGCSReference` is now a function,
not a method on `Client`.
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
loading data into a table from a file or any `io.Reader`.
_October 21, 2016_
Breaking change to pubsub: removed `pubsub.Done`.
Use `iterator.Done` instead, where `iterator` is the package
`google.golang.org/api/iterator`.
_October 19, 2016_
Breaking changes to cloud.google.com/go/bigquery:

86
vendor/cloud.google.com/go/run-tests.sh generated vendored Executable file
View file

@ -0,0 +1,86 @@
#!/bin/bash
# Selectively run tests for this repo, based on what has changed
# in a commit. Runs short tests for the whole repo, and full tests
# for changed directories.
set -e
prefix=cloud.google.com/go
dryrun=false
if [[ $1 == "-n" ]]; then
dryrun=true
shift
fi
if [[ $1 == "" ]]; then
echo >&2 "usage: $0 [-n] COMMIT"
exit 1
fi
# Files or directories that cause all tests to run if modified.
declare -A run_all
run_all=([.travis.yml]=1 [run-tests.sh]=1)
function run {
if $dryrun; then
echo $*
else
(set -x; $*)
fi
}
# Find all the packages that have changed in this commit.
declare -A changed_packages
for f in $(git diff-tree --no-commit-id --name-only -r $1); do
if [[ ${run_all[$f]} == 1 ]]; then
# This change requires a full test. Do it and exit.
run go test -race -v $prefix/...
exit
fi
# Map, e.g., "spanner/client.go" to "$prefix/spanner".
d=$(dirname $f)
if [[ $d == "." ]]; then
pkg=$prefix
else
pkg=$prefix/$d
fi
changed_packages[$pkg]=1
done
echo "changed packages: ${!changed_packages[*]}"
# Reports whether its argument, a package name, depends (recursively)
# on a changed package.
function depends_on_changed_package {
# According to go list, a package does not depend on itself, so
# we test that separately.
if [[ ${changed_packages[$1]} == 1 ]]; then
return 0
fi
for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do
if [[ ${changed_packages[$dep]} == 1 ]]; then
return 0
fi
done
return 1
}
# Collect the packages into two separate lists. (It is faster go test a list of
# packages than to individually go test each one.)
shorts=
fulls=
for pkg in $(go list $prefix/...); do # for each package in the repo
if depends_on_changed_package $pkg; then # if it depends on a changed package
fulls="$fulls $pkg" # run the full test
else # otherwise
shorts="$shorts $pkg" # run the short test
fi
done
run go test -race -v -short $shorts
run go test -race -v $fulls

View file

@ -1,4 +1,4 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.

21
vendor/github.com/blang/semver/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,21 @@
language: go
matrix:
include:
- go: 1.4.3
- go: 1.5.4
- go: 1.6.3
- go: 1.7
- go: tip
allow_failures:
- go: tip
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
script:
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
-repotoken $COVERALLS_TOKEN
- echo "Build examples" ; cd examples && go build
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
env:
global:
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=

View file

@ -1,4 +1,4 @@
semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
@ -41,6 +41,7 @@ Features
- Compare Helper Methods
- InPlace manipulation
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
- Wildcards `>=1.x`, `<=2.5.x`
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
@ -59,6 +60,8 @@ A condition is composed of an operator and a version. The supported operators ar
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
Note that spaces between the operator and the version will be gracefully tolerated.
A `Range` can link multiple `Ranges` separated by space:
Ranges can be linked by logical AND:

View file

@ -12,6 +12,6 @@
"license": "MIT",
"name": "semver",
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
"version": "3.4.0"
"version": "3.5.1"
}

View file

@ -12,3 +12,4 @@
*.test
tools/functional-tester/docker/bin
hack/tls-setup/certs
.idea

View file

@ -4,7 +4,8 @@ go_import_path: github.com/coreos/etcd
sudo: false
go:
- 1.7.5
- 1.8.3
- tip
notifications:
on_success: never
@ -13,6 +14,8 @@ notifications:
env:
matrix:
- TARGET=amd64
- TARGET=darwin-amd64
- TARGET=windows-amd64
- TARGET=arm64
- TARGET=arm
- TARGET=386
@ -23,6 +26,10 @@ matrix:
allow_failures:
- go: tip
exclude:
- go: tip
env: TARGET=darwin-amd64
- go: tip
env: TARGET=windows-amd64
- go: tip
env: TARGET=arm
- go: tip
@ -32,6 +39,24 @@ matrix:
- go: tip
env: TARGET=ppc64le
addons:
apt:
sources:
- debian-sid
packages:
- libpcap-dev
- libaspell-dev
- libhunspell-dev
- shellcheck
before_install:
- go get -v -u github.com/chzchzchz/goword
- go get -v -u github.com/coreos/license-bill-of-materials
- go get -v -u honnef.co/go/tools/cmd/gosimple
- go get -v -u honnef.co/go/tools/cmd/unused
- go get -v -u honnef.co/go/tools/cmd/staticcheck
- ./scripts/install-marker.sh amd64
# disable godep restore override
install:
- pushd cmd/etcd && go get -t -v ./... && popd
@ -42,6 +67,12 @@ script:
amd64)
GOARCH=amd64 ./test
;;
darwin-amd64)
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build
;;
windows-amd64)
GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build
;;
386)
GOARCH=386 PASSES="build unit" ./test
;;

View file

@ -1,6 +1,6 @@
# How to contribute
etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers and other resources to make getting your contribution into etcd easier.
etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers, and other resources to help get contributions into etcd.
# Email and chat
@ -14,24 +14,20 @@ etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
## Reporting bugs and creating issues
Reporting bugs is one of the best ways to contribute. However, a good bug report
has some very specific qualities, so please read over our short document on
[reporting bugs](https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md)
before you submit your bug report. This document might contain links known
issues, another good reason to take a look there, before reporting your bug.
Reporting bugs is one of the best ways to contribute. However, a good bug report has some very specific qualities, so please read over our short document on [reporting bugs](https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md) before submitting a bug report. This document might contain links to known issues, another good reason to take a look there before reporting a bug.
## Contribution flow
This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where you want to base your work. This is usually master.
- Create a topic branch from where to base the contribution. This is usually master.
- Make commits of logical units.
- Make sure your commit messages are in the proper format (see below).
- Push your changes to a topic branch in your fork of the repository.
- Make sure commit messages are in the proper format (see below).
- Push changes in a topic branch to a personal fork of the repository.
- Submit a pull request to coreos/etcd.
- Your PR must receive a LGTM from two maintainers found in the MAINTAINERS file.
- The PR must receive a LGTM from two maintainers found in the MAINTAINERS file.
Thanks for your contributions!
Thanks for contributing!
### Code style
@ -48,8 +44,7 @@ the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
this uses tmux to setup a test cluster that can easily be killed and started for debugging.
Fixes #38
```
@ -64,7 +59,4 @@ The format can be described more formally as follows:
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.
The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters. This allows the message to be easier to read on GitHub as well as in various git tools.

11
vendor/github.com/coreos/etcd/Dockerfile-release.arm64 generated vendored Normal file
View file

@ -0,0 +1,11 @@
FROM aarch64/ubuntu:16.04
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
ADD var/etcd /var/etcd
ADD var/lib/etcd /var/lib/etcd
EXPOSE 2379 2380
# Define default command.
CMD ["/usr/local/bin/etcd"]

View file

@ -0,0 +1,11 @@
FROM ppc64le/ubuntu:16.04
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
ADD var/etcd /var/etcd
ADD var/lib/etcd /var/lib/etcd
EXPOSE 2379 2380
# Define default command.
CMD ["/usr/local/bin/etcd"]

View file

@ -1,5 +1,6 @@
Anthony Romano <anthony.romano@coreos.com> (@heyitsanthony) pkg:*
Brandon Philips <brandon.philips@coreos.com> (@philips) pkg:*
Fanmin Shi <fanmin.shi@coreos.com> (@fanminshi) pkg:*
Gyu-Ho Lee <gyu_ho.lee@coreos.com> (@gyuho) pkg:*
Xiang Li <xiang.li@coreos.com> (@xiang90) pkg:*

25
vendor/github.com/coreos/etcd/NEWS generated vendored
View file

@ -1,3 +1,28 @@
etcd v3.1.6 (2017-04-19)
- remove auth check in Status API
- fill in Auth API response header
etcd v3.1.5 (2017-03-27)
- add '/etc/nsswitch.conf' file to alpine-based Docker image
- fix raft memory leak issue
- fix Windows file path issues
etcd v3.1.4 (2017-03-22)
etcd v3.1.3 (2017-03-10)
- use machine default host when advertise URLs are default
values(localhost:2379,2380) AND if listen URL is 0.0.0.0
- fix 'etcd gateway' schema handling in DNS discovery
- fix sd_notify behaviors in gateway, grpc-proxy
etcd v3.1.2 (2017-02-24)
- use IPv4 default host, by default (when IPv4 and IPv6 are available)
- fix 'etcd gateway' with multiple endpoints
etcd v3.1.1 (2017-02-17)
etcd v2.3.8 (2017-02-17)
etcd v3.1.0 (2017-01-20)
- faster linearizable reads (implements Raft read-index)
- automatic leadership transfer when leader steps down

View file

@ -11,7 +11,7 @@
![etcd Logo](logos/etcd-horizontal-color.png)
etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being:
etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
* *Simple*: well-defined, user-facing API (gRPC)
* *Secure*: automatic TLS with optional client cert authentication
@ -39,12 +39,9 @@ See [etcdctl][etcdctl] for a simple command line client.
The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
For those wanting to try the very latest version, you can [build the latest version of etcd][dl-build] from the `master` branch.
You will first need [*Go*](https://golang.org/) installed on your machine (version 1.7+ is required).
All development occurs on `master`, including new features and bug fixes.
Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.8+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
[rkt]: https://github.com/coreos/rkt/releases/
[rkt]: https://github.com/rkt/rkt/releases/
[github-release]: https://github.com/coreos/etcd/releases/
[branch-management]: ./Documentation/branch_management.md
[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
@ -96,7 +93,7 @@ Every cluster member and proxy accepts key value reads and key value writes.
### Running etcd on Kubernetes
If you want to run etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
To run an etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
### Next steps
@ -106,7 +103,7 @@ Now it's time to dig into the full etcd API and other guides.
- Explore the full gRPC [API][api].
- Set up a [multi-machine cluster][clustering].
- Learn the [config format, env variables and flags][configuration].
- Find [language bindings and tools][libraries-and-tools].
- Find [language bindings and tools][integrations].
- Use TLS to [secure an etcd cluster][security].
- [Tune etcd][tuning].
@ -114,7 +111,7 @@ Now it's time to dig into the full etcd API and other guides.
[api]: ./Documentation/dev-guide/api_reference_v3.md
[clustering]: ./Documentation/op-guide/clustering.md
[configuration]: ./Documentation/op-guide/configuration.md
[libraries-and-tools]: ./Documentation/libraries-and-tools.md
[integrations]: ./Documentation/integrations.md
[security]: ./Documentation/op-guide/security.md
[tuning]: ./Documentation/tuning.md
@ -131,7 +128,7 @@ See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the co
## Reporting bugs
See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issue you may encounter.
See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issues.
### License

View file

@ -6,7 +6,7 @@ This document defines a high level roadmap for etcd development.
The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
etcd 3.1 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
etcd 3.2 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
### etcd 3.2 (2017-May)
- Stable scalable proxy

379
vendor/github.com/coreos/etcd/bill-of-materials.json generated vendored Normal file
View file

@ -0,0 +1,379 @@
[
{
"project": "bitbucket.org/ww/goautoneg",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 1
}
]
},
{
"project": "github.com/beorn7/perks/quantile",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{
"project": "github.com/bgentry/speakeasy",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9441624365482234
}
]
},
{
"project": "github.com/boltdb/bolt",
"licenses": [
{
"type": "MIT License",
"confidence": 1
}
]
},
{
"project": "github.com/cockroachdb/cmux",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/coreos/etcd",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/coreos/go-semver/semver",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/coreos/go-systemd",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 0.9966703662597114
}
]
},
{
"project": "github.com/coreos/pkg",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/cpuguy83/go-md2man/md2man",
"licenses": [
{
"type": "MIT License",
"confidence": 1
}
]
},
{
"project": "github.com/dgrijalva/jwt-go",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{
"project": "github.com/dustin/go-humanize",
"licenses": [
{
"type": "MIT License",
"confidence": 0.96875
}
]
},
{
"project": "github.com/ghodss/yaml",
"licenses": [
{
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 1
}
]
},
{
"project": "github.com/gogo/protobuf/proto",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9090909090909091
}
]
},
{
"project": "github.com/golang/groupcache/lru",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 0.9966703662597114
}
]
},
{
"project": "github.com/golang/protobuf",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.92
}
]
},
{
"project": "github.com/google/btree",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/grpc-ecosystem/go-grpc-prometheus",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/grpc-ecosystem/grpc-gateway",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.979253112033195
}
]
},
{
"project": "github.com/inconshreveable/mousetrap",
"licenses": [
{
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 1
},
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/jonboulle/clockwork",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/mattn/go-runewidth",
"licenses": [
{
"type": "MIT License",
"confidence": 1
}
]
},
{
"project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/olekukonko/tablewriter",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{
"project": "github.com/prometheus/client_golang/prometheus",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/prometheus/client_model/go",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/prometheus/common",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/prometheus/procfs",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 1
}
]
},
{
"project": "github.com/russross/blackfriday",
"licenses": [
{
"type": "BSD 2-clause \"Simplified\" License",
"confidence": 0.9626168224299065
}
]
},
{
"project": "github.com/spf13/cobra",
"licenses": [
{
"type": "Apache License 2.0",
"confidence": 0.9573241061130334
}
]
},
{
"project": "github.com/spf13/pflag",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9663865546218487
}
]
},
{
"project": "github.com/ugorji/go/codec",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9946524064171123
}
]
},
{
"project": "github.com/urfave/cli",
"licenses": [
{
"type": "MIT License",
"confidence": 1
}
]
},
{
"project": "github.com/xiang90/probing",
"licenses": [
{
"type": "MIT License",
"confidence": 1
}
]
},
{
"project": "golang.org/x/crypto",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9663865546218487
}
]
},
{
"project": "golang.org/x/net",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9663865546218487
}
]
},
{
"project": "golang.org/x/text",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9663865546218487
}
]
},
{
"project": "golang.org/x/time/rate",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9663865546218487
}
]
},
{
"project": "google.golang.org/grpc",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.979253112033195
}
]
},
{
"project": "gopkg.in/cheggaaa/pb.v1",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",
"confidence": 0.9916666666666667
}
]
},
{
"project": "gopkg.in/yaml.v2",
"licenses": [
{
"type": "The Unlicense",
"confidence": 0.35294117647058826
},
{
"type": "MIT License",
"confidence": 0.8975609756097561
}
]
}
]

View file

@ -0,0 +1,26 @@
[
{
"project": "bitbucket.org/ww/goautoneg",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License"
}
]
},
{
"project": "github.com/ghodss/yaml",
"licenses": [
{
"type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License"
}
]
},
{
"project": "github.com/inconshreveable/mousetrap",
"licenses": [
{
"type": "Apache License 2.0"
}
]
}
]

23
vendor/github.com/coreos/etcd/build generated vendored
View file

@ -3,9 +3,7 @@
# set some environment variables
ORG_PATH="github.com/coreos"
REPO_PATH="${ORG_PATH}/etcd"
export GO15VENDOREXPERIMENT="1"
eval $(go env)
GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"`
if [ ! -z "$FAILPOINTS" ]; then
GIT_SHA="$GIT_SHA"-FAILPOINTS
@ -17,11 +15,7 @@ GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=$
# enable/disable failpoints
toggle_failpoints() {
FAILPKGS="etcdserver/ mvcc/backend/"
mode="disable"
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
if [ ! -z "$1" ]; then mode="$1"; fi
mode="$1"
if which gofail >/dev/null 2>&1; then
gofail "$mode" $FAILPKGS
elif [ "$mode" != "disable" ]; then
@ -30,19 +24,26 @@ toggle_failpoints() {
fi
}
toggle_failpoints_default() {
mode="disable"
if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
toggle_failpoints "$mode"
}
etcd_build() {
out="bin"
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
toggle_failpoints
toggle_failpoints_default
# Static compilation is useful when etcd is run in a container
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return
CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return
}
etcd_setup_gopath() {
CDIR=$(cd `dirname "$0"` && pwd)
d=$(dirname "$0")
CDIR=$(cd "$d" && pwd)
cd "$CDIR"
etcdGOPATH=${CDIR}/gopath
etcdGOPATH="${CDIR}/gopath"
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
if [ -n "$GOPATH" ]; then
GOPATH=":$GOPATH"
@ -53,7 +54,7 @@ etcd_setup_gopath() {
ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src
}
toggle_failpoints
toggle_failpoints_default
# only build when called directly, not sourced
if echo "$0" | grep "build$" >/dev/null; then

View file

@ -15,6 +15,7 @@
package client
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@ -27,6 +28,8 @@ import (
"sync"
"time"
"github.com/coreos/etcd/version"
"golang.org/x/net/context"
)
@ -201,6 +204,9 @@ type Client interface {
// returned
SetEndpoints(eps []string) error
// GetVersion retrieves the current etcd server and cluster version
GetVersion(ctx context.Context) (*version.Versions, error)
httpClient
}
@ -477,6 +483,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration
}
}
func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
act := &getAction{Prefix: "/version"}
resp, body, err := c.Do(ctx, act)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK:
if len(body) == 0 {
return nil, ErrEmptyBody
}
var vresp version.Versions
if err := json.Unmarshal(body, &vresp); err != nil {
return nil, ErrInvalidJSON
}
return &vresp, nil
default:
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return nil, ErrInvalidJSON
}
return nil, etcdErr
}
}
type roundTripResponse struct {
resp *http.Response
err error

View file

@ -28,6 +28,7 @@ import (
"time"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/version"
"golang.org/x/net/context"
)
@ -860,6 +861,34 @@ func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
}
}
func TestHTTPClusterClientGetVersion(t *testing.T) {
body := []byte(`{"etcdserver":"2.3.2","etcdcluster":"2.3.0"}`)
cf := newStaticHTTPClientFactory([]staticHTTPResponse{
{
resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Length": []string{"44"}}},
body: body,
},
})
hc := &httpClusterClient{
clientFactory: cf,
rand: rand.New(rand.NewSource(0)),
}
err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
if err != nil {
t.Fatalf("unexpected error during setup: %#v", err)
}
actual, err := hc.GetVersion(context.Background())
if err != nil {
t.Errorf("non-nil error: %#v", err)
}
expected := version.Versions{Server: "2.3.2", Cluster: "2.3.0"}
if !reflect.DeepEqual(&expected, actual) {
t.Errorf("incorrect Response: want=%#v got=%#v", expected, actual)
}
}
// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when
// it gets the exactly same member list as before.
func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {

View file

@ -14,8 +14,27 @@
package client
import (
"github.com/coreos/etcd/pkg/srv"
)
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
func (d *srvDiscover) Discover(domain string) ([]string, error) {
srvs, err := srv.GetClient("etcd-client", domain)
if err != nil {
return nil, err
}
return srvs.Endpoints, nil
}

View file

@ -1,65 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"net"
"net/url"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
)
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
// Discover looks up the etcd servers for the domain.
func (d *srvDiscover) Discover(domain string) ([]string, error) {
var urls []*url.URL
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
return nil
}
errHTTPS := updateURLs("etcd-client-ssl", "https")
errHTTP := updateURLs("etcd-client", "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return endpoints, nil
}

View file

@ -1,102 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"errors"
"net"
"reflect"
"testing"
)
func TestSRVDiscover(t *testing.T) {
defer func() { lookupSRV = net.LookupSRV }()
tests := []struct {
withSSL []*net.SRV
withoutSSL []*net.SRV
expected []string
}{
{
[]*net.SRV{},
[]*net.SRV{},
[]string{},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{
{Target: "10.0.0.1", Port: 7001},
},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{
{Target: "10.0.0.1", Port: 7001},
},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
},
{
[]*net.SRV{
{Target: "a.example.com", Port: 2480},
{Target: "b.example.com", Port: 2480},
{Target: "c.example.com", Port: 2480},
},
[]*net.SRV{},
[]string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"},
},
}
for i, tt := range tests {
lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) {
if service == "etcd-client-ssl" {
return "", tt.withSSL, nil
}
if service == "etcd-client" {
return "", tt.withoutSSL, nil
}
return "", nil, errors.New("Unknown service in mock")
}
d := NewSRVDiscover()
endpoints, err := d.Discover("example.com")
if err != nil {
t.Fatalf("%d: err: %#v", i, err)
}
if !reflect.DeepEqual(endpoints, tt.expected) {
t.Errorf("#%d: endpoints = %v, want %v", i, endpoints, tt.expected)
}
}
}

View file

@ -69,6 +69,12 @@ initial-cluster-state: 'new'
# Reject reconfiguration requests that would cause quorum loss.
strict-reconfig-check: false
# Accept etcd V2 client requests
enable-v2: true
# Enable runtime profiling data via HTTP server
enable-pprof: true
# Valid values include 'on', 'readonly', 'off'
proxy: 'off'

View file

@ -1,18 +1,18 @@
hash: ca3c895fa60c9ca9f53408202fb7643705f9960212d342967ed0da8e93606cc4
updated: 2017-01-18T10:26:48.990115455-08:00
hash: cee1f2629857e9c2384ad89ff6014db09498c9af53771e5144ad3a4b510ff00e
updated: 2017-05-30T10:29:08.22609283-07:00
imports:
- name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
subpackages:
- quantile
- name: github.com/bgentry/speakeasy
version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0
version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd
- name: github.com/boltdb/bolt
version: 583e8937c61f1af6513608ccc75c97b6abdf4ff9
- name: github.com/cockroachdb/cmux
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
- name: github.com/coreos/go-semver
version: 568e959cd89871e61434c1143528d9162da89ef2
version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
subpackages:
- semver
- name: github.com/coreos/go-systemd
@ -27,17 +27,23 @@ imports:
- capnslog
- dlopen
- name: github.com/cpuguy83/go-md2man
version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
version: bcc0a711c5e6bbe72c7cb13d81c7109b45267fd2
subpackages:
- md2man
- name: github.com/dgrijalva/jwt-go
version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
- name: github.com/dustin/go-humanize
version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0
- name: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
- name: github.com/gogo/protobuf
version: 909568be09de550ed094403c2bf8a261b5bb730a
subpackages:
- proto
- name: github.com/golang/groupcache
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
subpackages:
- lru
- name: github.com/golang/protobuf
version: 4bd1920723d7b7c925de087aa32e2187708897f7
subpackages:
@ -48,7 +54,7 @@ imports:
- name: github.com/grpc-ecosystem/go-grpc-prometheus
version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
- name: github.com/grpc-ecosystem/grpc-gateway
version: 84398b94e188ee336f307779b57b3aa91af7063c
version: 18d159699f2e83fc5bb9ef2f79465ca3f3122676
subpackages:
- runtime
- runtime/internal
@ -57,46 +63,42 @@ imports:
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/jonboulle/clockwork
version: 2eee05ed794112d45db504eb05aa693efd2b8b09
- name: github.com/karlseguin/ccache
version: a2d62155777b39595c825ed3824279e642a5db3c
- name: github.com/kr/pty
version: f7ee69f31298ecbe5d2b349c711e2547a617d398
version: 2c10821df3c3cf905230d078702dfbe9404c9b23
- name: github.com/mattn/go-runewidth
version: 737072b4e32b7a5018b4a7125da8d12de90e8045
version: 9e777a8366cce605130a531d2cd6363d07ad7317
subpackages:
- runewidth.go
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
- pbutil
- name: github.com/olekukonko/tablewriter
version: cca8bbc0798408af109aaaa239cbd2634846b340
version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
- name: github.com/prometheus/client_golang
version: c5b7fccd204277076155f10851dad72b76a49317
subpackages:
- prometheus
- name: github.com/prometheus/client_model
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
version: 6f3806018612930941127f2a7c6c453ba2c527d2
subpackages:
- go
- name: github.com/prometheus/common
version: 195bde7883f7c39ea62b0d92ab7359b5327065cb
version: 49fee292b27bfff7f354ee0f64e1bc4850462edf
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
version: a1dba9ce8baed984a2495b658c82687f8157b98f
subpackages:
- xfs
- name: github.com/russross/blackfriday
version: 5f33e7b7878355cd2b7e6b8eefc48a5472c69f70
- name: github.com/shurcooL/sanitized_anchor_name
version: 1dba4b3954bc059efc3991ec364f9f9a35f597d2
version: 0ba0f2b6ed7c475a92e4df8641825cb7a11d1fa3
- name: github.com/spf13/cobra
version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
- name: github.com/spf13/pflag
version: 08b1a584251b5b62f458943640fc8ebd4d50aaa5
- name: github.com/stretchr/testify
version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506
subpackages:
- assert
- name: github.com/ugorji/go
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
subpackages:
@ -111,7 +113,7 @@ imports:
- bcrypt
- blowfish
- name: golang.org/x/net
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
subpackages:
- context
- http2
@ -121,34 +123,36 @@ imports:
- lex/httplex
- trace
- name: golang.org/x/sys
version: 478fcf54317e52ab69f40bb4c7a1520288d7f7ea
version: e48874b42435b4347fc52bdee0424a52abc974d7
subpackages:
- unix
- name: golang.org/x/text
version: 4ee4af566555f5fbe026368b75596286a312663a
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: golang.org/x/time
version: a4bde12657593d5e90d0533a3e4fd95e635124cb
version: c06e80d9300e4443158a03817b8a8cb37d230320
subpackages:
- rate
- name: google.golang.org/grpc
version: 777daa17ff9b5daef1cfdf915088a2ada3332bf0
version: 8050b9cbc271307e5a716a9d782803d09b0d6f2d
subpackages:
- codes
- credentials
- grpclog
- internal
- keepalive
- metadata
- naming
- peer
- stats
- tap
- transport
- name: gopkg.in/cheggaaa/pb.v1
version: 226d21d43a305fac52b3a104ef83e721b15275e0
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
testImports:
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
testImports: []

View file

@ -1,13 +1,13 @@
package: github.com/coreos/etcd
import:
- package: github.com/bgentry/speakeasy
version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0
version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd
- package: github.com/boltdb/bolt
version: v1.3.0
- package: github.com/cockroachdb/cmux
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
- package: github.com/coreos/go-semver
version: 568e959cd89871e61434c1143528d9162da89ef2
version: v0.2.0
subpackages:
- semver
- package: github.com/coreos/go-systemd
@ -23,11 +23,15 @@ import:
- package: github.com/dustin/go-humanize
version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0
- package: github.com/ghodss/yaml
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
version: v1.0.0
- package: github.com/gogo/protobuf
version: v0.3
subpackages:
- proto
- package: github.com/golang/groupcache
version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
subpackages:
- lru
- package: github.com/golang/protobuf
version: 4bd1920723d7b7c925de087aa32e2187708897f7
subpackages:
@ -36,7 +40,7 @@ import:
- package: github.com/google/btree
version: 925471ac9e2131377a91e1595defec898166fe49
- package: github.com/grpc-ecosystem/grpc-gateway
version: 84398b94e188ee336f307779b57b3aa91af7063c
version: v1.2.0
subpackages:
- runtime
- runtime/internal
@ -44,13 +48,21 @@ import:
- package: github.com/jonboulle/clockwork
version: v0.1.0
- package: github.com/kr/pty
version: f7ee69f31298ecbe5d2b349c711e2547a617d398
version: v1.0.0
- package: github.com/olekukonko/tablewriter
version: cca8bbc0798408af109aaaa239cbd2634846b340
version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
- package: github.com/mattn/go-runewidth
version: v0.0.2
subpackages:
- runewidth.go
- package: github.com/prometheus/client_golang
version: v0.8.0
subpackages:
- prometheus
- package: github.com/prometheus/common
version: 49fee292b27bfff7f354ee0f64e1bc4850462edf
- package: github.com/prometheus/procfs
version: a1dba9ce8baed984a2495b658c82687f8157b98f
- package: github.com/spf13/cobra
version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
- package: github.com/spf13/pflag
@ -71,19 +83,21 @@ import:
- bcrypt
- blowfish
- package: golang.org/x/net
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
version: c8c74377599bd978aee1cf3b9b63a8634051cec2
subpackages:
- context
- http2
- http2/hpack
- internal/timeseries
- trace
- package: golang.org/x/sys
version: e48874b42435b4347fc52bdee0424a52abc974d7
- package: golang.org/x/time
version: a4bde12657593d5e90d0533a3e4fd95e635124cb
version: c06e80d9300e4443158a03817b8a8cb37d230320
subpackages:
- rate
- package: google.golang.org/grpc
version: v1.0.4
version: v1.2.1
subpackages:
- codes
- credentials
@ -96,10 +110,6 @@ import:
- package: gopkg.in/cheggaaa/pb.v1
version: v1.0.2
- package: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- package: github.com/stretchr/testify
version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506
subpackages:
- assert
- package: github.com/karlseguin/ccache
version: v2.0.2
version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
- package: github.com/dgrijalva/jwt-go
version: v3.0.0

35
vendor/github.com/coreos/etcd/main_test.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"os/signal"
"strings"
"syscall"
"testing"
)
func TestMain(t *testing.T) {
// don't launch etcd server when invoked via go test
if strings.HasSuffix(os.Args[0], "etcd.test") {
return
}
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
go main()
<-notifier
}

140
vendor/github.com/coreos/etcd/pkg/srv/srv.go generated vendored Normal file
View file

@ -0,0 +1,140 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package srv looks up DNS SRV records.
package srv
import (
"fmt"
"net"
"net/url"
"strings"
"github.com/coreos/etcd/pkg/types"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
resolveTCPAddr = net.ResolveTCPAddr
)
// GetCluster gets the cluster information via DNS discovery.
// Also sees each entry as a separate instance.
func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
for _, url := range apurls {
tcpAddr, err := resolveTCPAddr("tcp", url.Host)
if err != nil {
return nil, err
}
tcp2ap[tcpAddr.String()] = url
}
stringParts := []string{}
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
return err
}
for _, srv := range addrs {
port := fmt.Sprintf("%d", srv.Port)
host := net.JoinHostPort(srv.Target, port)
tcpAddr, terr := resolveTCPAddr("tcp", host)
if terr != nil {
err = terr
continue
}
n := ""
url, ok := tcp2ap[tcpAddr.String()]
if ok {
n = name
}
if n == "" {
n = fmt.Sprintf("%d", tempName)
tempName++
}
// SRV records have a trailing dot but URL shouldn't.
shortHost := strings.TrimSuffix(srv.Target, ".")
urlHost := net.JoinHostPort(shortHost, port)
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
if ok && url.Scheme != scheme {
err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
}
}
if len(stringParts) == 0 {
return err
}
return nil
}
failCount := 0
err := updateNodeMap(service+"-ssl", "https")
srvErr := make([]string, 2)
if err != nil {
srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
failCount++
}
err = updateNodeMap(service, "http")
if err != nil {
srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
failCount++
}
if failCount == 2 {
return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
}
return stringParts, nil
}
type SRVClients struct {
Endpoints []string
SRVs []*net.SRV
}
// GetClient looks up the client endpoints for a service and domain.
func GetClient(service, domain string) (*SRVClients, error) {
var urls []*url.URL
var srvs []*net.SRV
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
srvs = append(srvs, addrs...)
return nil
}
errHTTPS := updateURLs(service+"-ssl", "https")
errHTTP := updateURLs(service, "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
}

200
vendor/github.com/coreos/etcd/pkg/srv/srv_test.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package srv
import (
"errors"
"net"
"reflect"
"strings"
"testing"
"github.com/coreos/etcd/pkg/testutil"
)
func TestSRVGetCluster(t *testing.T) {
defer func() {
lookupSRV = net.LookupSRV
resolveTCPAddr = net.ResolveTCPAddr
}()
name := "dnsClusterTest"
dns := map[string]string{
"1.example.com.:2480": "10.0.0.1:2480",
"2.example.com.:2480": "10.0.0.2:2480",
"3.example.com.:2480": "10.0.0.3:2480",
"4.example.com.:2380": "10.0.0.3:2380",
}
srvAll := []*net.SRV{
{Target: "1.example.com.", Port: 2480},
{Target: "2.example.com.", Port: 2480},
{Target: "3.example.com.", Port: 2480},
}
tests := []struct {
withSSL []*net.SRV
withoutSSL []*net.SRV
urls []string
expected string
}{
{
[]*net.SRV{},
[]*net.SRV{},
nil,
"",
},
{
srvAll,
[]*net.SRV{},
nil,
"0=https://1.example.com:2480,1=https://2.example.com:2480,2=https://3.example.com:2480",
},
{
srvAll,
[]*net.SRV{{Target: "4.example.com.", Port: 2380}},
nil,
"0=https://1.example.com:2480,1=https://2.example.com:2480,2=https://3.example.com:2480,3=http://4.example.com:2380",
},
{
srvAll,
[]*net.SRV{{Target: "4.example.com.", Port: 2380}},
[]string{"https://10.0.0.1:2480"},
"dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480,2=http://4.example.com:2380",
},
// matching local member with resolved addr and return unresolved hostnames
{
srvAll,
nil,
[]string{"https://10.0.0.1:2480"},
"dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480",
},
// invalid
}
resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) {
if strings.Contains(addr, "10.0.0.") {
// accept IP addresses when resolving apurls
return net.ResolveTCPAddr(network, addr)
}
if dns[addr] == "" {
return nil, errors.New("missing dns record")
}
return net.ResolveTCPAddr(network, dns[addr])
}
for i, tt := range tests {
lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) {
if service == "etcd-server-ssl" {
return "", tt.withSSL, nil
}
if service == "etcd-server" {
return "", tt.withoutSSL, nil
}
return "", nil, errors.New("Unknown service in mock")
}
urls := testutil.MustNewURLs(t, tt.urls)
str, err := GetCluster("etcd-server", name, "example.com", urls)
if err != nil {
t.Fatalf("%d: err: %#v", i, err)
}
if strings.Join(str, ",") != tt.expected {
t.Errorf("#%d: cluster = %s, want %s", i, str, tt.expected)
}
}
}
func TestSRVDiscover(t *testing.T) {
defer func() { lookupSRV = net.LookupSRV }()
tests := []struct {
withSSL []*net.SRV
withoutSSL []*net.SRV
expected []string
}{
{
[]*net.SRV{},
[]*net.SRV{},
[]string{},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{
{Target: "10.0.0.1", Port: 7001},
},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
},
{
[]*net.SRV{
{Target: "10.0.0.1", Port: 2480},
{Target: "10.0.0.2", Port: 2480},
{Target: "10.0.0.3", Port: 2480},
},
[]*net.SRV{
{Target: "10.0.0.1", Port: 7001},
},
[]string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
},
{
[]*net.SRV{
{Target: "a.example.com", Port: 2480},
{Target: "b.example.com", Port: 2480},
{Target: "c.example.com", Port: 2480},
},
[]*net.SRV{},
[]string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"},
},
}
for i, tt := range tests {
lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) {
if service == "etcd-client-ssl" {
return "", tt.withSSL, nil
}
if service == "etcd-client" {
return "", tt.withoutSSL, nil
}
return "", nil, errors.New("Unknown service in mock")
}
srvs, err := GetClient("etcd-client", "example.com")
if err != nil {
t.Fatalf("%d: err: %#v", i, err)
}
if !reflect.DeepEqual(srvs.Endpoints, tt.expected) {
t.Errorf("#%d: endpoints = %v, want %v", i, srvs.Endpoints, tt.expected)
}
}
}

289
vendor/github.com/coreos/etcd/test generated vendored
View file

@ -10,51 +10,67 @@
# PKG=snap ./test
#
# Run code coverage
# COVERDIR=coverage PASSES=cov ./test
# COVERDIR must either be a absolute path or a relative path to the etcd root
# COVERDIR=coverage PASSES="build_cov cov" ./test
set -e
source ./build
# build before setting up test GOPATH
if [[ "${PASSES}" == *"functional"* ]]; then
./tools/functional-tester/build
fi
# build tests with vendored dependencies
etcd_setup_gopath
if [ -z "$PASSES" ]; then
PASSES="fmt dep compile build unit"
PASSES="fmt bom dep compile build unit"
fi
USERPKG=${PKG:-}
# Invoke ./cover for HTML output
COVER=${COVER:-"-cover"}
# Hack: gofmt ./ will recursively check the .git directory. So use *.go for gofmt.
IGNORE_PKGS="(cmd|vendor|etcdserverpb|rafttest|gopath.proto)"
IGNORE_PKGS="(cmd/|etcdserverpb|rafttest|gopath.proto|v3lockpb|v3electionpb)"
INTEGRATION_PKGS="(integration|e2e|contrib|functional-tester)"
# all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools
PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo`
# pkg1,pkg2,pkg3
PKGS_COMMA=${PKGS// /,}
TEST_PKGS=`find . -name \*_test.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"`
FORMATTABLE=`find . -name \*.go | while read a; do echo $(dirname $a)/"*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"`
FORMATTABLE=`find . -name \*.go | while read a; do echo "$(dirname $a)/*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"`
TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"`
if [ -z "$GOARCH" ]; then
GOARCH=$(go env GOARCH);
fi
# user has not provided PKG override
if [ -z "$PKG" ]; then
# check if user provided PKG override
if [ -z "${USERPKG}" ]; then
TEST=$TESTABLE_AND_FORMATTABLE
FMT=$FORMATTABLE
# user has provided PKG override
else
# strip out leading dotslashes and trailing slashes from PKG=./foo/
TEST=${PKG/#./}
TEST=${USERPKG/#./}
TEST=${TEST/#\//}
TEST=${TEST/%\//}
# only run gofmt on packages provided by user
FMT="$TEST"
fi
# split TEST into an array and prepend REPO_PATH to each local package
split=(${TEST// / })
TEST=${split[@]/#/${REPO_PATH}/}
TEST=${split/#/${REPO_PATH}/}
# TODO: 'client' pkg fails with gosimple from generated files
# TODO: 'rafttest' is failing with unused
STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'`
if [ -z "$GOARCH" ]; then
GOARCH=$(go env GOARCH);
fi
# determine whether target supports race detection
if [ "$GOARCH" == "amd64" ]; then
@ -76,6 +92,43 @@ function integration_pass {
go test -timeout 1m -v ${RACE} -cpu 1,2,4 -run=Example $@ ${TEST}
}
function functional_pass {
for a in 1 2 3; do
mkdir -p ./agent-$a
./bin/etcd-agent -etcd-path ./bin/etcd -etcd-log-dir "./agent-$a" -port ":${a}9027" -use-root=false &
pid="$!"
agent_pids="${agent_pids} $pid"
done
for a in 1 2 3; do
echo "Waiting for 'etcd-agent' on ${a}9027..."
while ! nc -z localhost ${a}9027; do
sleep 1
done
done
echo "Starting 'etcd-tester'"
./bin/etcd-tester \
-agent-endpoints "127.0.0.1:19027,127.0.0.1:29027,127.0.0.1:39027" \
-client-ports 12379,22379,32379 \
-peer-ports 12380,22380,32380 \
-limit 1 \
-schedule-cases "0 1 2 3 4 5" \
-exit-on-failure && echo "'etcd-tester' succeeded"
ETCD_TESTER_EXIT_CODE=$?
echo "ETCD_TESTER_EXIT_CODE:" ${ETCD_TESTER_EXIT_CODE}
echo "Waiting for processes to exit"
kill -s TERM ${agent_pids}
for a in ${agent_pids}; do wait $a || true; done
rm -rf ./agent-*
if [[ "${ETCD_TESTER_EXIT_CODE}" -ne "0" ]]; then
echo "FAIL with exit code" ${ETCD_TESTER_EXIT_CODE}
exit ${ETCD_TESTER_EXIT_CODE}
fi
}
function cov_pass {
echo "Running code coverage..."
# install gocovmerge before running code coverage from github.com/wadey/gocovmerge
@ -90,33 +143,56 @@ function cov_pass {
exit 255
fi
if [ ! -f "bin/etcd_test" ]; then
echo "etcd_test binary not found"
exit 255
fi
mkdir -p "$COVERDIR"
# PKGS_DELIM contains all the core etcd pkgs delimited by ',' which will be profiled for code coverage.
# Integration tests will generate code coverage for those pkgs
PKGS_DELIM=$(echo $TEST | sed 's/ /,/g')
# TODO create coverage to e2e test
PKGS=`echo "$TEST_PKGS" | egrep -v "(e2e|functional-tester)"`
for t in ${PKGS}; do
# run code coverage for unit and integration tests
GOCOVFLAGS="-covermode=set -coverpkg $PKGS_COMMA -v -timeout 15m"
failed=""
for t in `echo "${TEST_PKGS}" | egrep -v "(e2e|functional-tester)"`; do
tf=`echo $t | tr / _`
# cache package compilation data for faster repeated builds
go test $GOCOVFLAGS -i ${REPO_PATH}/$t || true
# uses -run=Test to skip examples because clientv3/ example tests will leak goroutines
go test -covermode=set -coverpkg $PKGS_DELIM -timeout 15m -run=Test -v -coverprofile "$COVERDIR/${tf}.coverprofile" ${REPO_PATH}/$t
go test $GOCOVFLAGS -run=Test -coverprofile "$COVERDIR/${tf}.coverprofile" ${REPO_PATH}/$t || failed="$failed $t"
done
# proxy tests
go test -tags cluster_proxy $GOCOVFLAGS -coverprofile "$COVERDIR/proxy_integration.coverprofile" ${REPO_PATH}/integration || failed="$failed proxy-integration"
go test -tags cluster_proxy $GOCOVFLAGS -coverprofile "$COVERDIR/proxy_clientv3.coverprofile" ${REPO_PATH}/clientv3/integration || failed="$failed proxy-clientv3/integration"
# run code coverage for e2e tests
# use 30m timeout because e2e coverage takes longer
# due to many tests cause etcd process to wait
# on leadership transfer timeout during gracefully shutdown
go test -tags cov -timeout 30m -v ${REPO_PATH}"/e2e" || failed="$failed e2e"
gocovmerge "$COVERDIR"/*.coverprofile >"$COVERDIR"/cover.out
# strip out generated files (using GNU-style sed)
sed --in-place '/generated.go/d' "$COVERDIR"/cover.out || true
# held failures to generate the full coverage file, now fail
if [ -n "$failed" ]; then
for f in $failed; do
echo FAIL $f
done
exit 255
fi
}
function e2e_pass {
echo "Running e2e tests..."
go test -timeout 10m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e
go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e
}
function integration_e2e_pass {
echo "Running integration and e2e tests..."
go test -timeout 10m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e &
go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/e2e &
e2epid="$!"
go test -timeout 15m -v -cpu 1,2,4 $@ ${REPO_PATH}/integration &
intpid="$!"
@ -130,16 +206,21 @@ function integration_e2e_pass {
function grpcproxy_pass {
go test -timeout 15m -v ${RACE} -tags cluster_proxy -cpu 1,2,4 $@ ${REPO_PATH}/integration
go test -timeout 15m -v ${RACE} -tags cluster_proxy -cpu 1,2,4 $@ ${REPO_PATH}/clientv3/integration
}
function release_pass {
rm -f ./bin/etcd-last-release
# to grab latest patch release; bump this up for every minor release
UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.0.*" | head -1)
UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.2.*" | head -1)
if [ -n "$MANUAL_VER" ]; then
# in case, we need to test against different version
UPGRADE_VER=$MANUAL_VER
fi
if [[ -z ${UPGRADE_VER} ]]; then
UPGRADE_VER="v3.2.0"
echo "fallback to" ${UPGRADE_VER}
fi
local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz"
echo "Downloading $file"
@ -150,9 +231,8 @@ function release_pass {
set -e
case $result in
0) ;;
22) return 0
;;
*) exit $result
*) echo "FAIL with" ${result}
exit $result
;;
esac
@ -178,29 +258,128 @@ function fmt_pass {
exit 255
fi
echo "Checking 'go tool vet -shadow'..."
for path in $FMT; do
if [ "${path##*.}" != "go" ]; then
path="${path}/*.go"
fi
vetRes=$(go tool vet -shadow ${path})
echo "Checking 'go tool vet -all -shadow'..."
fmtpkgs=$(echo $FMT | xargs dirname | sort | uniq | sed '/\./d')
vetRes=$(go tool vet -all -shadow ${fmtpkgs} 2>&1 | grep -v '/gw/' || true)
if [ -n "${vetRes}" ]; then
echo -e "govet -shadow checking ${path} failed:\n${vetRes}"
echo -e "govet -all -shadow checking failed:\n${vetRes}"
exit 255
fi
done
if which shellcheck >/dev/null; then
echo "Checking shellcheck..."
shellcheckResult=$(shellcheck -fgcc build test scripts/* 2>&1 || true)
if [ -n "${shellcheckResult}" ]; then
# mask the most common ones; fix later
SHELLCHECK_MASK="SC(2086|2006|2068|2196|2035|2162|2076)"
errs=$(echo "${shellcheckResult}" | egrep -v "${SHELLCHECK_MASK}" || true)
if [ -n "${errs}" ]; then
echo -e "shellcheck checking failed:\n${shellcheckResult}\n===\nFailed:\n${errs}"
exit 255
fi
suppressed=$(echo "${shellcheckResult}" | cut -f4- -d':' | sort | uniq -c | sort -n)
echo -e "shellcheck suppressed warnings:\n${suppressed}"
fi
fi
echo "Checking documentation style..."
# eschew you
yous=`find . -name \*.md -exec egrep --color "[Yy]ou[r]?[ '.,;]" {} + | grep -v /v2/ || true`
if [ ! -z "$yous" ]; then
echo -e "found 'you' in documentation:\n${yous}"
exit 255
fi
# TODO: check other markdown files when marker handles headers with '[]'
if which marker >/dev/null; then
echo "Checking marker to find broken links..."
markerResult=`marker --skip-http --root ./Documentation 2>&1 || true`
if [ -n "${markerResult}" ]; then
echo -e "marker checking failed:\n${markerResult}"
exit 255
fi
else
echo "Skipping marker..."
fi
if which goword >/dev/null; then
echo "Checking goword..."
# get all go files to process
gofiles=`find $FMT -iname '*.go' 2>/dev/null`
# ignore tests and protobuf files
gofiles=`echo ${gofiles} | sort | uniq | sed "s/ /\n/g" | egrep -v "(\\_test.go|\\.pb\\.go)"`
# only check for broken exported godocs
gowordRes=`goword -use-spell=false ${gofiles} | grep godoc-export | sort`
if [ ! -z "$gowordRes" ]; then
echo -e "goword checking failed:\n${gowordRes}"
exit 255
fi
else
echo "Skipping goword..."
fi
if which gosimple >/dev/null; then
echo "Checking gosimple..."
gosimpleResult=`gosimple ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
if [ -n "${gosimpleResult}" ]; then
# TODO: resolve these after go1.8 migration
SIMPLE_CHECK_MASK="S(1024)"
if echo "${gosimpleResult}" | egrep -v "$SIMPLE_CHECK_MASK"; then
echo -e "gosimple checking failed:\n${gosimpleResult}"
exit 255
else
echo -e "gosimple warning:\n${gosimpleResult}"
fi
fi
else
echo "Skipping gosimple..."
fi
if which unused >/dev/null; then
echo "Checking unused..."
unusedResult=`unused ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
if [ -n "${unusedResult}" ]; then
echo -e "unused checking failed:\n${unusedResult}"
exit 255
fi
else
echo "Skipping unused..."
fi
if which staticcheck >/dev/null; then
echo "Checking staticcheck..."
staticcheckResult=`staticcheck ${STATIC_ANALYSIS_PATHS} 2>&1 || true`
if [ -n "${staticcheckResult}" ]; then
# TODO: resolve these after go1.8 migration
# See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck
STATIC_CHECK_MASK="SA(1019|2002)"
if echo "${staticcheckResult}" | egrep -v "$STATIC_CHECK_MASK"; then
echo -e "staticcheck checking failed:\n${staticcheckResult}"
exit 255
else
suppressed=`echo "${staticcheckResult}" | sed 's/ /\n/g' | grep "(SA" | sort | uniq -c`
echo -e "staticcheck suppressed warnings:\n${suppressed}"
fi
fi
else
echo "Skipping staticcheck..."
fi
echo "Checking for license header..."
licRes=$(for file in $(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*'); do
head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e " ${file}"
done;)
licRes=""
files=$(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*')
for file in $files; do
if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then
licRes="${licRes}"$(echo -e " ${file}")
fi
done
if [ -n "${licRes}" ]; then
echo -e "license header checking failed:\n${licRes}"
exit 255
fi
echo "Checking commit titles..."
git log --oneline `git merge-base HEAD master`...HEAD | while read l; do
git log --oneline "$(git merge-base HEAD master)"...HEAD | while read l; do
commitMsg=`echo "$l" | cut -f2- -d' '`
if [[ "$commitMsg" == Merge* ]]; then
# ignore "Merge pull" commits
@ -223,12 +402,27 @@ function fmt_pass {
done
}
function bom_pass {
if ! which license-bill-of-materials >/dev/null; then
return
fi
echo "Checking bill of materials..."
license-bill-of-materials \
--override-file bill-of-materials.override.json \
github.com/coreos/etcd github.com/coreos/etcd/etcdctl >bom-now.json || true
if ! diff bill-of-materials.json bom-now.json; then
echo "vendored licenses do not match given bill of materials"
exit 255
fi
rm bom-now.json
}
function dep_pass {
echo "Checking package dependencies..."
# don't pull in etcdserver package
pushd clientv3 >/dev/null
badpkg="(etcdserver|mvcc)"
deps=`go list -f '{{ .Deps }}' | sed 's/ /\n/g' | egrep "${badpkg}" | egrep -v "${badpkg}/" || echo ""`
badpkg="(etcdserver$|mvcc$|backend$|grpc-gateway)"
deps=`go list -f '{{ .Deps }}' | sed 's/ /\n/g' | egrep "${badpkg}" || echo ""`
popd >/dev/null
if [ ! -z "$deps" ]; then
echo -e "clientv3 has masked dependencies:\n${deps}"
@ -236,6 +430,13 @@ function dep_pass {
fi
}
function build_cov_pass {
out="bin"
if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcd_test
go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcdctl_test ${REPO_PATH}/etcdctl
}
function compile_pass {
echo "Checking build..."
go build -v ./tools/...

56
vendor/github.com/coreos/etcd/version/version.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version implements etcd version parsing and contains latest version
// information.
package version
import (
"fmt"
"strings"
"github.com/coreos/go-semver/semver"
)
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.2.6"
APIVersion = "unknown"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
func init() {
ver, err := semver.NewVersion(Version)
if err == nil {
APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
}
}
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
// TODO: raft state machine version
}
// Cluster only keeps the major.minor.
func Cluster(v string) string {
vs := strings.Split(v, ".")
if len(vs) <= 2 {
return v
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}

View file

@ -91,7 +91,12 @@ func expires(date, expires string) (time.Duration, bool, error) {
return 0, false, nil
}
te, err := time.Parse(time.RFC1123, expires)
var te time.Time
var err error
if expires == "0" {
return 0, false, nil
}
te, err = time.Parse(time.RFC1123, expires)
if err != nil {
return 0, false, err
}

View file

@ -177,6 +177,13 @@ func TestExpiresPass(t *testing.T) {
wantTTL: 0,
wantOK: false,
},
// Expires set to false
{
date: "Thu, 01 Dec 1983 22:00:00 GMT",
exp: "0",
wantTTL: 0,
wantOK: false,
},
// Expires < Date
{
date: "Fri, 02 Dec 1983 01:00:00 GMT",

View file

@ -2,7 +2,6 @@ package oidc
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
@ -182,14 +181,16 @@ func (r *remoteKeySet) updateKeys() ([]jose.JSONWebKey, time.Time, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, time.Time{}, fmt.Errorf("oidc: read response body: %v", err)
return nil, time.Time{}, fmt.Errorf("unable to read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return nil, time.Time{}, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
}
var keySet jose.JSONWebKeySet
if err := json.Unmarshal(body, &keySet); err != nil {
err = unmarshalResp(resp, body, &keySet)
if err != nil {
return nil, time.Time{}, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
}

View file

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"mime"
"net/http"
"strings"
"time"
@ -93,18 +94,23 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
return nil, fmt.Errorf("unable to read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
defer resp.Body.Close()
var p providerJSON
if err := json.Unmarshal(body, &p); err != nil {
err = unmarshalResp(resp, body, &p)
if err != nil {
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
}
if p.Issuer != issuer {
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
}
@ -307,3 +313,16 @@ func (j *jsonTime) UnmarshalJSON(b []byte) error {
*j = jsonTime(time.Unix(unix, 0))
return nil
}
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
err := json.Unmarshal(body, &v)
if err == nil {
return nil
}
ct := r.Header.Get("Content-Type")
mediaType, _, parseErr := mime.ParseMediaType(ct)
if parseErr == nil && mediaType == "application/json" {
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
}
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
}

View file

@ -475,6 +475,7 @@ func (g *fakeProviderConfigGetterSetter) Set(cfg ProviderConfig) error {
type fakeProviderConfigHandler struct {
cfg ProviderConfig
maxAge time.Duration
noExpires bool
}
func (s *fakeProviderConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
@ -482,6 +483,9 @@ func (s *fakeProviderConfigHandler) ServeHTTP(w http.ResponseWriter, r *http.Req
if s.maxAge.Seconds() >= 0 {
w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", int(s.maxAge.Seconds())))
}
if s.noExpires {
w.Header().Set("Expires", "0")
}
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
@ -555,6 +559,7 @@ func TestHTTPProviderConfigGetter(t *testing.T) {
dsc string
age time.Duration
cfg ProviderConfig
noExpires bool
ok bool
}{
// everything is good
@ -596,6 +601,17 @@ func TestHTTPProviderConfigGetter(t *testing.T) {
},
ok: true,
},
// An expires header set to 0
{
dsc: "https://example.com",
age: time.Minute,
cfg: ProviderConfig{
Issuer: &url.URL{Scheme: "https", Host: "example.com"},
ExpiresAt: now.Add(time.Minute),
},
ok: true,
noExpires: true,
},
}
for i, tt := range tests {

8
vendor/github.com/coreos/go-semver/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,8 @@
language: go
sudo: false
go:
- 1.4
- 1.5
- 1.6
- tip
script: cd semver && go test

202
vendor/github.com/coreos/go-semver/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

28
vendor/github.com/coreos/go-semver/README.md generated vendored Normal file
View file

@ -0,0 +1,28 @@
# go-semver - Semantic Versioning Library
[![Build Status](https://travis-ci.org/coreos/go-semver.svg?branch=master)](https://travis-ci.org/coreos/go-semver)
[![GoDoc](https://godoc.org/github.com/coreos/go-semver/semver?status.svg)](https://godoc.org/github.com/coreos/go-semver/semver)
go-semver is a [semantic versioning][semver] library for Go. It lets you parse
and compare two semantic version strings.
[semver]: http://semver.org/
## Usage
```go
vA := semver.New("1.2.3")
vB := semver.New("3.2.1")
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
```
## Example Application
```
$ go run example.go 1.2.3 3.2.1
1.2.3 < 3.2.1 == true
$ go run example.go 5.2.3 3.2.1
5.2.3 < 3.2.1 == false
```

20
vendor/github.com/coreos/go-semver/example.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
package main
import (
"fmt"
"github.com/coreos/go-semver/semver"
"os"
)
func main() {
vA, err := semver.NewVersion(os.Args[1])
if err != nil {
fmt.Println(err.Error())
}
vB, err := semver.NewVersion(os.Args[2])
if err != nil {
fmt.Println(err.Error())
}
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
}

268
vendor/github.com/coreos/go-semver/semver/semver.go generated vendored Normal file
View file

@ -0,0 +1,268 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Semantic Versions http://semver.org
package semver
import (
"bytes"
"errors"
"fmt"
"strconv"
"strings"
)
type Version struct {
Major int64
Minor int64
Patch int64
PreRelease PreRelease
Metadata string
}
type PreRelease string
func splitOff(input *string, delim string) (val string) {
parts := strings.SplitN(*input, delim, 2)
if len(parts) == 2 {
*input = parts[0]
val = parts[1]
}
return val
}
func New(version string) *Version {
return Must(NewVersion(version))
}
func NewVersion(version string) (*Version, error) {
v := Version{}
if err := v.Set(version); err != nil {
return nil, err
}
return &v, nil
}
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
func Must(v *Version, err error) *Version {
if err != nil {
panic(err)
}
return v
}
// Set parses and updates v from the given version string. Implements flag.Value
func (v *Version) Set(version string) error {
metadata := splitOff(&version, "+")
preRelease := PreRelease(splitOff(&version, "-"))
dotParts := strings.SplitN(version, ".", 3)
if len(dotParts) != 3 {
return fmt.Errorf("%s is not in dotted-tri format", version)
}
parsed := make([]int64, 3, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
parsed[i] = val
if err != nil {
return err
}
}
v.Metadata = metadata
v.PreRelease = preRelease
v.Major = parsed[0]
v.Minor = parsed[1]
v.Patch = parsed[2]
return nil
}
func (v Version) String() string {
var buffer bytes.Buffer
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
if v.PreRelease != "" {
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
}
if v.Metadata != "" {
fmt.Fprintf(&buffer, "+%s", v.Metadata)
}
return buffer.String()
}
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data string
if err := unmarshal(&data); err != nil {
return err
}
return v.Set(data)
}
func (v Version) MarshalJSON() ([]byte, error) {
return []byte(`"` + v.String() + `"`), nil
}
func (v *Version) UnmarshalJSON(data []byte) error {
l := len(data)
if l == 0 || string(data) == `""` {
return nil
}
if l < 2 || data[0] != '"' || data[l-1] != '"' {
return errors.New("invalid semver string")
}
return v.Set(string(data[1 : l-1]))
}
// Compare tests if v is less than, equal to, or greater than versionB,
// returning -1, 0, or +1 respectively.
func (v Version) Compare(versionB Version) int {
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
return cmp
}
return preReleaseCompare(v, versionB)
}
// Equal tests if v is equal to versionB.
func (v Version) Equal(versionB Version) bool {
return v.Compare(versionB) == 0
}
// LessThan tests if v is less than versionB.
func (v Version) LessThan(versionB Version) bool {
return v.Compare(versionB) < 0
}
// Slice converts the comparable parts of the semver into a slice of integers.
func (v Version) Slice() []int64 {
return []int64{v.Major, v.Minor, v.Patch}
}
func (p PreRelease) Slice() []string {
preRelease := string(p)
return strings.Split(preRelease, ".")
}
func preReleaseCompare(versionA Version, versionB Version) int {
a := versionA.PreRelease
b := versionB.PreRelease
/* Handle the case where if two versions are otherwise equal it is the
* one without a PreRelease that is greater */
if len(a) == 0 && (len(b) > 0) {
return 1
} else if len(b) == 0 && (len(a) > 0) {
return -1
}
// If there is a prerelease, check and compare each part.
return recursivePreReleaseCompare(a.Slice(), b.Slice())
}
func recursiveCompare(versionA []int64, versionB []int64) int {
if len(versionA) == 0 {
return 0
}
a := versionA[0]
b := versionB[0]
if a > b {
return 1
} else if a < b {
return -1
}
return recursiveCompare(versionA[1:], versionB[1:])
}
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
// A larger set of pre-release fields has a higher precedence than a smaller set,
// if all of the preceding identifiers are equal.
if len(versionA) == 0 {
if len(versionB) > 0 {
return -1
}
return 0
} else if len(versionB) == 0 {
// We're longer than versionB so return 1.
return 1
}
a := versionA[0]
b := versionB[0]
aInt := false
bInt := false
aI, err := strconv.Atoi(versionA[0])
if err == nil {
aInt = true
}
bI, err := strconv.Atoi(versionB[0])
if err == nil {
bInt = true
}
// Handle Integer Comparison
if aInt && bInt {
if aI > bI {
return 1
} else if aI < bI {
return -1
}
}
// Handle String Comparison
if a > b {
return 1
} else if a < b {
return -1
}
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
}
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
func (v *Version) BumpMajor() {
v.Major += 1
v.Minor = 0
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
func (v *Version) BumpMinor() {
v.Minor += 1
v.Patch = 0
v.PreRelease = PreRelease("")
v.Metadata = ""
}
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
func (v *Version) BumpPatch() {
v.Patch += 1
v.PreRelease = PreRelease("")
v.Metadata = ""
}

View file

@ -0,0 +1,370 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"math/rand"
"reflect"
"testing"
"time"
"gopkg.in/yaml.v2"
)
type fixture struct {
GreaterVersion string
LesserVersion string
}
var fixtures = []fixture{
fixture{"0.0.0", "0.0.0-foo"},
fixture{"0.0.1", "0.0.0"},
fixture{"1.0.0", "0.9.9"},
fixture{"0.10.0", "0.9.0"},
fixture{"0.99.0", "0.10.0"},
fixture{"2.0.0", "1.2.3"},
fixture{"0.0.0", "0.0.0-foo"},
fixture{"0.0.1", "0.0.0"},
fixture{"1.0.0", "0.9.9"},
fixture{"0.10.0", "0.9.0"},
fixture{"0.99.0", "0.10.0"},
fixture{"2.0.0", "1.2.3"},
fixture{"0.0.0", "0.0.0-foo"},
fixture{"0.0.1", "0.0.0"},
fixture{"1.0.0", "0.9.9"},
fixture{"0.10.0", "0.9.0"},
fixture{"0.99.0", "0.10.0"},
fixture{"2.0.0", "1.2.3"},
fixture{"1.2.3", "1.2.3-asdf"},
fixture{"1.2.3", "1.2.3-4"},
fixture{"1.2.3", "1.2.3-4-foo"},
fixture{"1.2.3-5-foo", "1.2.3-5"},
fixture{"1.2.3-5", "1.2.3-4"},
fixture{"1.2.3-5-foo", "1.2.3-5-Foo"},
fixture{"3.0.0", "2.7.2+asdf"},
fixture{"3.0.0+foobar", "2.7.2"},
fixture{"1.2.3-a.10", "1.2.3-a.5"},
fixture{"1.2.3-a.b", "1.2.3-a.5"},
fixture{"1.2.3-a.b", "1.2.3-a"},
fixture{"1.2.3-a.b.c.10.d.5", "1.2.3-a.b.c.5.d.100"},
fixture{"1.0.0", "1.0.0-rc.1"},
fixture{"1.0.0-rc.2", "1.0.0-rc.1"},
fixture{"1.0.0-rc.1", "1.0.0-beta.11"},
fixture{"1.0.0-beta.11", "1.0.0-beta.2"},
fixture{"1.0.0-beta.2", "1.0.0-beta"},
fixture{"1.0.0-beta", "1.0.0-alpha.beta"},
fixture{"1.0.0-alpha.beta", "1.0.0-alpha.1"},
fixture{"1.0.0-alpha.1", "1.0.0-alpha"},
}
func TestCompare(t *testing.T) {
for _, v := range fixtures {
gt, err := NewVersion(v.GreaterVersion)
if err != nil {
t.Error(err)
}
lt, err := NewVersion(v.LesserVersion)
if err != nil {
t.Error(err)
}
if gt.LessThan(*lt) {
t.Errorf("%s should not be less than %s", gt, lt)
}
if gt.Equal(*lt) {
t.Errorf("%s should not be equal to %s", gt, lt)
}
if gt.Compare(*lt) <= 0 {
t.Errorf("%s should be greater than %s", gt, lt)
}
if !lt.LessThan(*gt) {
t.Errorf("%s should be less than %s", lt, gt)
}
if !lt.Equal(*lt) {
t.Errorf("%s should be equal to %s", lt, lt)
}
if lt.Compare(*gt) > 0 {
t.Errorf("%s should not be greater than %s", lt, gt)
}
}
}
func testString(t *testing.T, orig string, version *Version) {
if orig != version.String() {
t.Errorf("%s != %s", orig, version)
}
}
func TestString(t *testing.T) {
for _, v := range fixtures {
gt, err := NewVersion(v.GreaterVersion)
if err != nil {
t.Error(err)
}
testString(t, v.GreaterVersion, gt)
lt, err := NewVersion(v.LesserVersion)
if err != nil {
t.Error(err)
}
testString(t, v.LesserVersion, lt)
}
}
func shuffleStringSlice(src []string) []string {
dest := make([]string, len(src))
rand.Seed(time.Now().Unix())
perm := rand.Perm(len(src))
for i, v := range perm {
dest[v] = src[i]
}
return dest
}
func TestSort(t *testing.T) {
sortedVersions := []string{"1.0.0", "1.0.2", "1.2.0", "3.1.1"}
unsortedVersions := shuffleStringSlice(sortedVersions)
semvers := []*Version{}
for _, v := range unsortedVersions {
sv, err := NewVersion(v)
if err != nil {
t.Fatal(err)
}
semvers = append(semvers, sv)
}
Sort(semvers)
for idx, sv := range semvers {
if sv.String() != sortedVersions[idx] {
t.Fatalf("incorrect sort at index %v", idx)
}
}
}
func TestBumpMajor(t *testing.T) {
version, _ := NewVersion("1.0.0")
version.BumpMajor()
if version.Major != 2 {
t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
}
version, _ = NewVersion("1.5.2")
version.BumpMajor()
if version.Minor != 0 && version.Patch != 0 {
t.Fatalf("bumping major on 1.5.2 resulted in %v", version)
}
version, _ = NewVersion("1.0.0+build.1-alpha.1")
version.BumpMajor()
if version.PreRelease != "" && version.PreRelease != "" {
t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
}
}
func TestBumpMinor(t *testing.T) {
version, _ := NewVersion("1.0.0")
version.BumpMinor()
if version.Major != 1 {
t.Fatalf("bumping minor on 1.0.0 resulted in %v", version)
}
if version.Minor != 1 {
t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
}
version, _ = NewVersion("1.0.0+build.1-alpha.1")
version.BumpMinor()
if version.PreRelease != "" && version.PreRelease != "" {
t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
}
}
func TestBumpPatch(t *testing.T) {
version, _ := NewVersion("1.0.0")
version.BumpPatch()
if version.Major != 1 {
t.Fatalf("bumping minor on 1.0.0 resulted in %v", version)
}
if version.Minor != 0 {
t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
}
if version.Patch != 1 {
t.Fatalf("bumping major on 1.0.0 resulted in %v", version)
}
version, _ = NewVersion("1.0.0+build.1-alpha.1")
version.BumpPatch()
if version.PreRelease != "" && version.PreRelease != "" {
t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version)
}
}
func TestMust(t *testing.T) {
tests := []struct {
versionStr string
version *Version
recov interface{}
}{
{
versionStr: "1.0.0",
version: &Version{Major: 1},
},
{
versionStr: "version number",
recov: errors.New("version number is not in dotted-tri format"),
},
}
for _, tt := range tests {
func() {
defer func() {
recov := recover()
if !reflect.DeepEqual(tt.recov, recov) {
t.Fatalf("incorrect panic for %q: want %v, got %v", tt.versionStr, tt.recov, recov)
}
}()
version := Must(NewVersion(tt.versionStr))
if !reflect.DeepEqual(tt.version, version) {
t.Fatalf("incorrect version for %q: want %+v, got %+v", tt.versionStr, tt.version, version)
}
}()
}
}
type fixtureJSON struct {
GreaterVersion *Version
LesserVersion *Version
}
func TestJSON(t *testing.T) {
fj := make([]fixtureJSON, len(fixtures))
for i, v := range fixtures {
var err error
fj[i].GreaterVersion, err = NewVersion(v.GreaterVersion)
if err != nil {
t.Fatal(err)
}
fj[i].LesserVersion, err = NewVersion(v.LesserVersion)
if err != nil {
t.Fatal(err)
}
}
fromStrings, err := json.Marshal(fixtures)
if err != nil {
t.Fatal(err)
}
fromVersions, err := json.Marshal(fj)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(fromStrings, fromVersions) {
t.Errorf("Expected: %s", fromStrings)
t.Errorf("Unexpected: %s", fromVersions)
}
fromJson := make([]fixtureJSON, 0, len(fj))
err = json.Unmarshal(fromStrings, &fromJson)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(fromJson, fj) {
t.Error("Expected: ", fj)
t.Error("Unexpected: ", fromJson)
}
}
func TestYAML(t *testing.T) {
document, err := yaml.Marshal(fixtures)
if err != nil {
t.Fatal(err)
}
expected := make([]fixtureJSON, len(fixtures))
for i, v := range fixtures {
var err error
expected[i].GreaterVersion, err = NewVersion(v.GreaterVersion)
if err != nil {
t.Fatal(err)
}
expected[i].LesserVersion, err = NewVersion(v.LesserVersion)
if err != nil {
t.Fatal(err)
}
}
fromYAML := make([]fixtureJSON, 0, len(fixtures))
err = yaml.Unmarshal(document, &fromYAML)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(fromYAML, expected) {
t.Error("Expected: ", expected)
t.Error("Unexpected: ", fromYAML)
}
}
func TestBadInput(t *testing.T) {
bad := []string{
"1.2",
"1.2.3x",
"0x1.3.4",
"-1.2.3",
"1.2.3.4",
}
for _, b := range bad {
if _, err := NewVersion(b); err == nil {
t.Error("Improperly accepted value: ", b)
}
}
}
func TestFlag(t *testing.T) {
v := Version{}
f := flag.NewFlagSet("version", flag.ContinueOnError)
f.Var(&v, "version", "set version")
if err := f.Set("version", "1.2.3"); err != nil {
t.Fatal(err)
}
if v.String() != "1.2.3" {
t.Errorf("Set wrong value %q", v)
}
}
func ExampleVersion_LessThan() {
vA := New("1.2.3")
vB := New("3.2.1")
fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB))
// Output:
// 1.2.3 < 3.2.1 == true
}

38
vendor/github.com/coreos/go-semver/semver/sort.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright 2013-2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package semver
import (
"sort"
)
type Versions []*Version
func (s Versions) Len() int {
return len(s)
}
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s Versions) Less(i, j int) bool {
return s[i].LessThan(*s[j])
}
// Sort sorts the given slice of Version
func Sort(versions []*Version) {
sort.Sort(Versions(versions))
}

1
vendor/github.com/dnstap/golang-dnstap/.gitignore generated vendored Normal file
View file

@ -0,0 +1 @@
*.swp

View file

@ -1,10 +0,0 @@
.deps/
.dirstamp
.libs/
*.pb-c.c
*.pb-c.h
*.pb.cc
*.pb.h
*.pb.go
*_pb2.py
*_pb2.pyc

View file

@ -1,121 +0,0 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

View file

@ -1,5 +0,0 @@
dnstap: flexible, structured event replication format for DNS software
----------------------------------------------------------------------
This directory contains only the protobuf schemas for dnstap, and is the root of
a repository named "dnstap.pb".

View file

@ -1,268 +0,0 @@
// dnstap: flexible, structured event replication format for DNS software
//
// This file contains the protobuf schemas for the "dnstap" structured event
// replication format for DNS software.
// Written in 2013-2014 by Farsight Security, Inc.
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this file to the public
// domain worldwide. This file is distributed without any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication along
// with this file. If not, see:
//
// <http://creativecommons.org/publicdomain/zero/1.0/>.
package dnstap;
// "Dnstap": this is the top-level dnstap type, which is a "union" type that
// contains other kinds of dnstap payloads, although currently only one type
// of dnstap payload is defined.
// See: https://developers.google.com/protocol-buffers/docs/techniques#union
message Dnstap {
// DNS server identity.
// If enabled, this is the identity string of the DNS server which generated
// this message. Typically this would be the same string as returned by an
// "NSID" (RFC 5001) query.
optional bytes identity = 1;
// DNS server version.
// If enabled, this is the version string of the DNS server which generated
// this message. Typically this would be the same string as returned by a
// "version.bind" query.
optional bytes version = 2;
// Extra data for this payload.
// This field can be used for adding an arbitrary byte-string annotation to
// the payload. No encoding or interpretation is applied or enforced.
optional bytes extra = 3;
// Identifies which field below is filled in.
enum Type {
MESSAGE = 1;
}
required Type type = 15;
// One of the following will be filled in.
optional Message message = 14;
}
// SocketFamily: the network protocol family of a socket. This specifies how
// to interpret "network address" fields.
enum SocketFamily {
INET = 1; // IPv4 (RFC 791)
INET6 = 2; // IPv6 (RFC 2460)
}
// SocketProtocol: the transport protocol of a socket. This specifies how to
// interpret "transport port" fields.
enum SocketProtocol {
UDP = 1; // User Datagram Protocol (RFC 768)
TCP = 2; // Transmission Control Protocol (RFC 793)
}
// Message: a wire-format (RFC 1035 section 4) DNS message and associated
// metadata. Applications generating "Message" payloads should follow
// certain requirements based on the MessageType, see below.
message Message {
// There are eight types of "Message" defined that correspond to the
// four arrows in the following diagram, slightly modified from RFC 1035
// section 2:
// +---------+ +----------+ +--------+
// | | query | | query | |
// | Stub |-SQ--------CQ->| Recursive|-RQ----AQ->| Auth. |
// | Resolver| | Server | | Name |
// | |<-SR--------CR-| |<-RR----AR-| Server |
// +---------+ response | | response | |
// +----------+ +--------+
// Each arrow has two Type values each, one for each "end" of each arrow,
// because these are considered to be distinct events. Each end of each
// arrow on the diagram above has been marked with a two-letter Type
// mnemonic. Clockwise from upper left, these mnemonic values are:
//
// SQ: STUB_QUERY
// CQ: CLIENT_QUERY
// RQ: RESOLVER_QUERY
// AQ: AUTH_QUERY
// AR: AUTH_RESPONSE
// RR: RESOLVER_RESPONSE
// CR: CLIENT_RESPONSE
// SR: STUB_RESPONSE
// Two additional types of "Message" have been defined for the
// "forwarding" case where an upstream DNS server is responsible for
// further recursion. These are not shown on the diagram above, but have
// the following mnemonic values:
// FQ: FORWARDER_QUERY
// FR: FORWARDER_RESPONSE
// The "Message" Type values are defined below.
enum Type {
// AUTH_QUERY is a DNS query message received from a resolver by an
// authoritative name server, from the perspective of the authorative
// name server.
AUTH_QUERY = 1;
// AUTH_RESPONSE is a DNS response message sent from an authoritative
// name server to a resolver, from the perspective of the authoritative
// name server.
AUTH_RESPONSE = 2;
// RESOLVER_QUERY is a DNS query message sent from a resolver to an
// authoritative name server, from the perspective of the resolver.
// Resolvers typically clear the RD (recursion desired) bit when
// sending queries.
RESOLVER_QUERY = 3;
// RESOLVER_RESPONSE is a DNS response message received from an
// authoritative name server by a resolver, from the perspective of
// the resolver.
RESOLVER_RESPONSE = 4;
// CLIENT_QUERY is a DNS query message sent from a client to a DNS
// server which is expected to perform further recursion, from the
// perspective of the DNS server. The client may be a stub resolver or
// forwarder or some other type of software which typically sets the RD
// (recursion desired) bit when querying the DNS server. The DNS server
// may be a simple forwarding proxy or it may be a full recursive
// resolver.
CLIENT_QUERY = 5;
// CLIENT_RESPONSE is a DNS response message sent from a DNS server to
// a client, from the perspective of the DNS server. The DNS server
// typically sets the RA (recursion available) bit when responding.
CLIENT_RESPONSE = 6;
// FORWARDER_QUERY is a DNS query message sent from a downstream DNS
// server to an upstream DNS server which is expected to perform
// further recursion, from the perspective of the downstream DNS
// server.
FORWARDER_QUERY = 7;
// FORWARDER_RESPONSE is a DNS response message sent from an upstream
// DNS server performing recursion to a downstream DNS server, from the
// perspective of the downstream DNS server.
FORWARDER_RESPONSE = 8;
// STUB_QUERY is a DNS query message sent from a stub resolver to a DNS
// server, from the perspective of the stub resolver.
STUB_QUERY = 9;
// STUB_RESPONSE is a DNS response message sent from a DNS server to a
// stub resolver, from the perspective of the stub resolver.
STUB_RESPONSE = 10;
// TOOL_QUERY is a DNS query message sent from a DNS software tool to a
// DNS server, from the perspective of the tool.
TOOL_QUERY = 11;
// TOOL_RESPONSE is a DNS response message received by a DNS software
// tool from a DNS server, from the perspective of the tool.
TOOL_RESPONSE = 12;
}
// One of the Type values described above.
required Type type = 1;
// One of the SocketFamily values described above.
optional SocketFamily socket_family = 2;
// One of the SocketProtocol values described above.
optional SocketProtocol socket_protocol = 3;
// The network address of the message initiator.
// For SocketFamily INET, this field is 4 octets (IPv4 address).
// For SocketFamily INET6, this field is 16 octets (IPv6 address).
optional bytes query_address = 4;
// The network address of the message responder.
// For SocketFamily INET, this field is 4 octets (IPv4 address).
// For SocketFamily INET6, this field is 16 octets (IPv6 address).
optional bytes response_address = 5;
// The transport port of the message initiator.
// This is a 16-bit UDP or TCP port number, depending on SocketProtocol.
optional uint32 query_port = 6;
// The transport port of the message responder.
// This is a 16-bit UDP or TCP port number, depending on SocketProtocol.
optional uint32 response_port = 7;
// The time at which the DNS query message was sent or received, depending
// on whether this is an AUTH_QUERY, RESOLVER_QUERY, or CLIENT_QUERY.
// This is the number of seconds since the UNIX epoch.
optional uint64 query_time_sec = 8;
// The time at which the DNS query message was sent or received.
// This is the seconds fraction, expressed as a count of nanoseconds.
optional fixed32 query_time_nsec = 9;
// The initiator's original wire-format DNS query message, verbatim.
optional bytes query_message = 10;
// The "zone" or "bailiwick" pertaining to the DNS query message.
// This is a wire-format DNS domain name.
optional bytes query_zone = 11;
// The time at which the DNS response message was sent or received,
// depending on whether this is an AUTH_RESPONSE, RESOLVER_RESPONSE, or
// CLIENT_RESPONSE.
// This is the number of seconds since the UNIX epoch.
optional uint64 response_time_sec = 12;
// The time at which the DNS response message was sent or received.
// This is the seconds fraction, expressed as a count of nanoseconds.
optional fixed32 response_time_nsec = 13;
// The responder's original wire-format DNS response message, verbatim.
optional bytes response_message = 14;
}
// All fields except for 'type' in the Message schema are optional.
// It is recommended that at least the following fields be filled in for
// particular types of Messages.
// AUTH_QUERY:
// socket_family, socket_protocol
// query_address, query_port
// query_message
// query_time_sec, query_time_nsec
// AUTH_RESPONSE:
// socket_family, socket_protocol
// query_address, query_port
// query_time_sec, query_time_nsec
// response_message
// response_time_sec, response_time_nsec
// RESOLVER_QUERY:
// socket_family, socket_protocol
// query_message
// query_time_sec, query_time_nsec
// query_zone
// response_address, response_port
// RESOLVER_RESPONSE:
// socket_family, socket_protocol
// query_time_sec, query_time_nsec
// query_zone
// response_address, response_port
// response_message
// response_time_sec, response_time_nsec
// CLIENT_QUERY:
// socket_family, socket_protocol
// query_message
// query_time_sec, query_time_nsec
// CLIENT_RESPONSE:
// socket_family, socket_protocol
// query_time_sec, query_time_nsec
// response_message
// response_time_sec, response_time_nsec

View file

@ -1,164 +0,0 @@
/*
* Copyright (c) 2013-2014 by Farsight Security, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/dnstap/golang-dnstap"
)
var (
flagReadFile = flag.String("r", "", "read dnstap payloads from file")
flagReadSock = flag.String("u", "", "read dnstap payloads from unix socket")
flagWriteFile = flag.String("w", "-", "write output to file")
flagQuietText = flag.Bool("q", false, "use quiet text output")
flagYamlText = flag.Bool("y", false, "use verbose YAML output")
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s [OPTION]...\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
Quiet text output format mnemonics:
AQ: AUTH_QUERY
AR: AUTH_RESPONSE
RQ: RESOLVER_QUERY
RR: RESOLVER_RESPONSE
CQ: CLIENT_QUERY
CR: CLIENT_RESPONSE
FQ: FORWARDER_QUERY
FR: FORWARDER_RESPONSE
SQ: STUB_QUERY
SR: STUB_RESPONSE
TQ: TOOL_QUERY
TR: TOOL_RESPONSE
`)
}
func outputOpener(fname string, text, yaml bool) func() dnstap.Output {
return func() dnstap.Output {
var o dnstap.Output
var err error
if text {
o, err = dnstap.NewTextOutputFromFilename(fname, dnstap.TextFormat)
} else if yaml {
o, err = dnstap.NewTextOutputFromFilename(fname, dnstap.YamlFormat)
} else {
o, err = dnstap.NewFrameStreamOutputFromFilename(fname)
}
if err != nil {
fmt.Fprintf(os.Stderr, "dnstap: Failed to open output file: %s\n", err)
os.Exit(1)
}
go o.RunOutputLoop()
return o
}
}
func outputLoop(opener func() dnstap.Output, data <-chan []byte, done chan<- struct{}) {
sigch := make(chan os.Signal, 1)
signal.Notify(sigch, os.Interrupt, syscall.SIGHUP)
o := opener()
defer func() {
o.Close()
close(done)
os.Exit(0)
}()
for {
select {
case b, ok := <-data:
if !ok {
return
}
o.GetOutputChannel() <- b
case sig := <-sigch:
if sig == syscall.SIGHUP {
o.Close()
o = opener()
continue
}
return
}
}
}
func main() {
var err error
var i dnstap.Input
runtime.GOMAXPROCS(runtime.NumCPU())
log.SetFlags(0)
flag.Usage = usage
// Handle command-line arguments.
flag.Parse()
if *flagReadFile == "" && *flagReadSock == "" {
fmt.Fprintf(os.Stderr, "dnstap: Error: no inputs specified.\n")
os.Exit(1)
}
if *flagWriteFile == "-" {
if *flagQuietText == false && *flagYamlText == false {
*flagQuietText = true
}
}
if *flagReadFile != "" && *flagReadSock != "" {
fmt.Fprintf(os.Stderr, "dnstap: Error: specify exactly one of -r or -u.\n")
os.Exit(1)
}
// Start the output loop.
output := make(chan []byte, 1)
opener := outputOpener(*flagWriteFile, *flagQuietText, *flagYamlText)
outDone := make(chan struct{})
go outputLoop(opener, output, outDone)
// Open the input and start the input loop.
if *flagReadFile != "" {
i, err = dnstap.NewFrameStreamInputFromFilename(*flagReadFile)
if err != nil {
fmt.Fprintf(os.Stderr, "dnstap: Failed to open input file: %s\n", err)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "dnstap: opened input file %s\n", *flagReadFile)
} else if *flagReadSock != "" {
i, err = dnstap.NewFrameStreamSockInputFromPath(*flagReadSock)
if err != nil {
fmt.Fprintf(os.Stderr, "dnstap: Failed to open input socket: %s\n", err)
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "dnstap: opened input socket %s\n", *flagReadSock)
}
i.ReadInto(output)
// Wait for input loop to finish.
i.Wait()
close(output)
<-outDone
}

View file

@ -16,3 +16,4 @@ davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
Misty Stanley-Jones <misty@docker.com> Misty Stanley-Jones <misty@apache.org>

View file

@ -1,4 +1,3 @@
a-palchikov <deemok@gmail.com>
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Schlesinger <aschlesinger@deis.com>
Aaron Vinson <avinson.public@gmail.com>
@ -18,10 +17,11 @@ Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anis Elleuch <vadmeste@gmail.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Anton Tiurin <noxiouz@yandex.ru>
Anusha Ragunathan <anusha@docker.com>
a-palchikov <deemok@gmail.com>
Arien Holthuizen <aholthuizen@schubergphilis.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
@ -46,9 +46,9 @@ Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
davidli <wenquan.li@hp.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
@ -68,8 +68,8 @@ gabriell nascimento <gabriell@bluesoft.com.br>
Gleb Schukin <gschukin@ptsecurity.com>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
Hu Keping <hukeping@huawei.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
@ -86,21 +86,21 @@ Jihoon Chung <jihoon@gmail.com>
Joao Fernandes <joao.fernandes@docker.com>
John Mulhausen <john@docker.com>
John Starks <jostarks@microsoft.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jon Johnson <jonjohnson@google.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Chorlton <josh.chorlton@docker.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Ke Xu <leonhartx.k@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
Ke Xu <leonhartx.k@gmail.com>
liuchang0812 <liuchang0812@gmail.com>
Liu Hua <sdu.liu@huawei.com>
Li Yi <denverdino@gmail.com>
Lloyd Ramey <lnr0626@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
@ -108,15 +108,14 @@ Marcus Martins <marcus@docker.com>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matthew Green <greenmr@live.co.uk>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Matthew Green <greenmr@live.co.uk>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Michal Minář <miminar@redhat.com>
Mike Brown <brownwm@us.ibm.com>
Miquel Sabaté <msabate@suse.com>
Misty Stanley-Jones <misty@apache.org>
Misty Stanley-Jones <misty@docker.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
@ -165,17 +164,18 @@ Tonis Tiigi <tonistiigi@gmail.com>
Tony Holdstock-Brown <tony@docker.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Victor Vieux <vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com>
Victor Vieux <vieux@docker.com>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
W. Trevor King <wking@tremily.us>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yaoyao.xyy <yaoyao.xyy@alibaba-inc.com>
yixi zhang <yixi@memsql.com>
yuexiao-wang <wang.yuexiao@zte.com.cn>
yuzou <zouyu7@huawei.com>
zhouhaibing089 <zhouhaibing089@gmail.com>

View file

@ -0,0 +1 @@
.*swp

View file

@ -1,63 +0,0 @@
/*
* Copyright (c) 2014 by Farsight Security, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"log"
"os"
"github.com/farsightsec/golang-framestream"
)
func main() {
// Arguments.
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "Usage: %s <INPUT FILE>\n", os.Args[0])
fmt.Fprintf(os.Stderr, "Dumps a FrameStreams formatted input file.\n\n")
os.Exit(1)
}
fname := os.Args[1]
// Open the input file.
file, err := os.Open(fname)
if err != nil {
log.Fatal(err)
}
// Create the decoder.
fs, err := framestream.NewDecoder(file, nil)
if err != nil {
log.Fatal(err)
}
// Print the frames.
fmt.Printf("Control frame [START] (%v bytes): %x\n", len(fs.ControlStart), fs.ControlStart)
for {
frame, err := fs.Decode()
if err == framestream.EOF {
break
}
if err != nil {
log.Fatal(err)
}
fmt.Printf("Data frame (%v bytes): %x\n", len(frame), frame)
}
if fs.ControlStop != nil {
fmt.Printf("Control frame [STOP] (%v bytes): %x\n", len(fs.ControlStop), fs.ControlStop)
}
}

View file

@ -38,6 +38,7 @@ var (
type ExpandOptions struct {
RelativeBase string
SkipSchemas bool
ContinueOnError bool
}
// ResolutionCache a cache for resolving urls
@ -510,7 +511,8 @@ func (r *schemaLoader) reset() {
// ExpandSpec expands the references in a swagger spec
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
resolver, err := defaultSchemaLoader(spec, nil, options, nil)
if err != nil {
// Just in case this ever returns an error.
if shouldStopOnError(err, resolver.options) {
return err
}
@ -518,7 +520,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
for key, definition := range spec.Definitions {
var def *Schema
var err error
if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); err != nil {
if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
@ -527,14 +529,14 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
}
for key, parameter := range spec.Parameters {
if err := expandParameter(&parameter, resolver); err != nil {
if err := expandParameter(&parameter, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Parameters[key] = parameter
}
for key, response := range spec.Responses {
if err := expandResponse(&response, resolver); err != nil {
if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Responses[key] = response
@ -542,7 +544,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
if spec.Paths != nil {
for key, path := range spec.Paths.Paths {
if err := expandPathItem(&path, resolver); err != nil {
if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Paths.Paths[key] = path
@ -552,6 +554,18 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
return nil
}
func shouldStopOnError(err error, opts *ExpandOptions) bool {
if err != nil && !opts.ContinueOnError {
return true
}
if err != nil {
log.Println(err)
}
return false
}
// ExpandSchema expands the refs in the schema object
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
return ExpandSchemaWithBasePath(schema, root, cache, nil)
@ -639,7 +653,7 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
return &target, nil
}
if err := resolver.Resolve(&target.Ref, &t); err != nil {
if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) {
return &target, err
}
@ -648,87 +662,109 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
return &target, nil
}
parentRefs = append(parentRefs, target.Ref.String())
if t != nil {
target = *t
}
}
t, err := expandItems(target, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target = *t
}
for i := range target.AllOf {
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.AllOf[i] = *t
}
}
for i := range target.AnyOf {
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
target.AnyOf[i] = *t
}
for i := range target.OneOf {
t, err := expandSchema(target.OneOf[i], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.OneOf[i] = *t
}
}
if target.Not != nil {
t, err := expandSchema(*target.Not, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.Not = *t
}
}
for k := range target.Properties {
t, err := expandSchema(target.Properties[k], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.Properties[k] = *t
}
}
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.AdditionalProperties.Schema = *t
}
}
for k := range target.PatternProperties {
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.PatternProperties[k] = *t
}
}
for k := range target.Dependencies {
if target.Dependencies[k].Schema != nil {
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.Dependencies[k].Schema = *t
}
}
}
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.AdditionalItems.Schema = *t
}
}
for k := range target.Definitions {
t, err := expandSchema(target.Definitions[k], parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.Definitions[k] = *t
}
}
return &target, nil
}
@ -736,6 +772,7 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
if pathItem == nil {
return nil
}
if pathItem.Ref.String() != "" {
if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil {
return err
@ -745,29 +782,29 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
}
for idx := range pathItem.Parameters {
if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil {
if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) {
return err
}
}
if err := expandOperation(pathItem.Get, resolver); err != nil {
if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Head, resolver); err != nil {
if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Options, resolver); err != nil {
if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Put, resolver); err != nil {
if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Post, resolver); err != nil {
if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Patch, resolver); err != nil {
if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Delete, resolver); err != nil {
if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) {
return err
}
return nil
@ -777,8 +814,9 @@ func expandOperation(op *Operation, resolver *schemaLoader) error {
if op == nil {
return nil
}
for i, param := range op.Parameters {
if err := expandParameter(&param, resolver); err != nil {
if err := expandParameter(&param, resolver); shouldStopOnError(err, resolver.options) {
return err
}
op.Parameters[i] = param
@ -786,11 +824,11 @@ func expandOperation(op *Operation, resolver *schemaLoader) error {
if op.Responses != nil {
responses := op.Responses
if err := expandResponse(responses.Default, resolver); err != nil {
if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) {
return err
}
for code, response := range responses.StatusCodeResponses {
if err := expandResponse(&response, resolver); err != nil {
if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) {
return err
}
responses.StatusCodeResponses[code] = response
@ -805,9 +843,10 @@ func expandResponse(response *Response, resolver *schemaLoader) error {
}
var parentRefs []string
if response.Ref.String() != "" {
parentRefs = append(parentRefs, response.Ref.String())
if err := resolver.Resolve(&response.Ref, response); err != nil {
if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
@ -817,11 +856,11 @@ func expandResponse(response *Response, resolver *schemaLoader) error {
if !resolver.options.SkipSchemas && response.Schema != nil {
parentRefs = append(parentRefs, response.Schema.Ref.String())
debugLog("response ref: %s", response.Schema.Ref)
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) {
return err
}
s, err := expandSchema(*response.Schema, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
@ -836,9 +875,10 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
}
var parentRefs []string
if parameter.Ref.String() != "" {
parentRefs = append(parentRefs, parameter.Ref.String())
if err := resolver.Resolve(&parameter.Ref, parameter); err != nil {
if err := resolver.Resolve(&parameter.Ref, parameter); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
@ -846,11 +886,11 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
}
if !resolver.options.SkipSchemas && parameter.Schema != nil {
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
if err := resolver.Resolve(&parameter.Schema.Ref, &parameter.Schema); err != nil {
if err := resolver.Resolve(&parameter.Schema.Ref, &parameter.Schema); shouldStopOnError(err, resolver.options) {
return err
}
s, err := expandSchema(*parameter.Schema, parentRefs, resolver)
if err != nil {
if shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()

View file

@ -197,6 +197,35 @@ func TestCircularRefsExpansion(t *testing.T) {
}, "Calling expand schema with circular refs, should not panic!")
}
func TestContinueOnErrorExpansion(t *testing.T) {
missingRefDoc, err := jsonDoc("fixtures/expansion/missingRef.json")
assert.NoError(t, err)
testCase := struct {
Input *Swagger `json:"input"`
Expected *Swagger `json:"expected"`
}{}
err = json.Unmarshal(missingRefDoc, &testCase)
assert.NoError(t, err)
opts := &ExpandOptions{
ContinueOnError: true,
}
err = ExpandSpec(testCase.Input, opts)
assert.NoError(t, err)
assert.Equal(t, testCase.Input, testCase.Expected, "Should continue expanding spec when a definition can't be found.")
doc, err := jsonDoc("fixtures/expansion/missingItemRef.json")
spec := new(Swagger)
err = json.Unmarshal(doc, spec)
assert.NoError(t, err)
assert.NotPanics(t, func() {
err = ExpandSpec(spec, opts)
assert.NoError(t, err)
}, "Array of missing refs should not cause a panic, and continue to expand spec.")
}
func TestIssue415(t *testing.T) {
doc, err := jsonDoc("fixtures/expansion/clickmeter.json")
assert.NoError(t, err)

View file

@ -120,25 +120,18 @@ func NewRef(refURI string) (Ref, error) {
return Ref{Ref: ref}, nil
}
// MustCreateRef creates a ref object but
// MustCreateRef creates a ref object but panics when refURI is invalid.
// Use the NewRef method for a version that returns an error.
func MustCreateRef(refURI string) Ref {
return Ref{Ref: jsonreference.MustCreateRef(refURI)}
}
// // NewResolvedRef creates a resolved ref
// func NewResolvedRef(refURI string, data interface{}) Ref {
// return Ref{
// Ref: jsonreference.MustCreateRef(refURI),
// Resolved: data,
// }
// }
// MarshalJSON marshals this ref into a JSON object
func (r Ref) MarshalJSON() ([]byte, error) {
str := r.String()
if str == "" {
if r.IsRoot() {
return []byte(`{"$ref":"#"}`), nil
return []byte(`{"$ref":""}`), nil
}
return []byte("{}"), nil
}

View file

@ -201,8 +201,8 @@ func (r *SchemaURL) UnmarshalJSON(data []byte) error {
type SchemaProps struct {
ID string `json:"id,omitempty"`
Ref Ref `json:"-,omitempty"`
Schema SchemaURL `json:"-,omitempty"`
Ref Ref `json:"-"`
Schema SchemaURL `json:"-"`
Description string `json:"description,omitempty"`
Type StringOrArray `json:"type,omitempty"`
Format string `json:"format,omitempty"`

View file

@ -237,6 +237,8 @@ func newNameIndex(tpe reflect.Type) nameIndex {
// GetJSONNames gets all the json property names for a type
func (n *NameProvider) GetJSONNames(subject interface{}) []string {
n.lock.Lock()
defer n.lock.Unlock()
tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
names, ok := n.index[tpe]
if !ok {
@ -258,6 +260,8 @@ func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bo
// GetJSONNameForType gets the json name for a go property name on a given type
func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
n.lock.Lock()
defer n.lock.Unlock()
names, ok := n.index[tpe]
if !ok {
names = n.makeNameIndex(tpe)
@ -267,8 +271,6 @@ func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string
}
func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
n.lock.Lock()
defer n.lock.Unlock()
names := newNameIndex(tpe)
n.index[tpe] = names
return names
@ -282,6 +284,8 @@ func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool
// GetGoNameForType gets the go name for a given type for a json property name
func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
n.lock.Lock()
defer n.lock.Unlock()
names, ok := n.index[tpe]
if !ok {
names = n.makeNameIndex(tpe)

17
vendor/github.com/golang/protobuf/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,17 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
install:
- go get -v -d -t github.com/golang/protobuf/...
- curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip
- unzip /tmp/protoc.zip -d $HOME/protoc
env:
- PATH=$HOME/protoc/bin:$PATH
script:
- make all test

View file

@ -1,5 +1,7 @@
# Go support for Protocol Buffers
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
Google's data interchange format.
Copyright 2010 The Go Authors.
https://github.com/golang/protobuf

View file

@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
func sizeZigzag64(x uint64) int {
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer

View file

@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
fv.SetUint(x)
return nil
}
case reflect.Uint64:

View file

@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/"
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)

View file

@ -1,11 +1,11 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
// source: google/protobuf/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
github.com/golang/protobuf/ptypes/any/any.proto
google/protobuf/any.proto
It has these top-level messages:
Any
@ -149,20 +149,20 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 184 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
0x38, 0xe5, 0x73, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x45, 0x4c, 0xcc, 0xee, 0x01, 0x4e, 0xab,
0x98, 0xe4, 0xdc, 0x21, 0x46, 0x05, 0x40, 0x95, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5,
0x97, 0xe7, 0x85, 0x80, 0x94, 0x26, 0xb1, 0x81, 0xf5, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0x45, 0x1f, 0x1a, 0xf2, 0xf3, 0x00, 0x00, 0x00,
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
}

View file

@ -52,37 +52,37 @@ var durationTests = []struct {
dur time.Duration
}{
// The zero duration.
{&durpb.Duration{0, 0}, true, true, 0},
{&durpb.Duration{Seconds: 0, Nanos: 0}, true, true, 0},
// Some ordinary non-zero durations.
{&durpb.Duration{100, 0}, true, true, 100 * time.Second},
{&durpb.Duration{-100, 0}, true, true, -100 * time.Second},
{&durpb.Duration{100, 987}, true, true, 100*time.Second + 987},
{&durpb.Duration{-100, -987}, true, true, -(100*time.Second + 987)},
{&durpb.Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second},
{&durpb.Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second},
{&durpb.Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987},
{&durpb.Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)},
// The largest duration representable in Go.
{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64},
{&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64},
// The smallest duration representable in Go.
{&durpb.Duration{minGoSeconds, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64},
{&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64},
{nil, false, false, 0},
{&durpb.Duration{-100, 987}, false, false, 0},
{&durpb.Duration{100, -987}, false, false, 0},
{&durpb.Duration{math.MinInt64, 0}, false, false, 0},
{&durpb.Duration{math.MaxInt64, 0}, false, false, 0},
{&durpb.Duration{Seconds: -100, Nanos: 987}, false, false, 0},
{&durpb.Duration{Seconds: 100, Nanos: -987}, false, false, 0},
{&durpb.Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0},
{&durpb.Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0},
// The largest valid duration.
{&durpb.Duration{maxSeconds, 1e9 - 1}, true, false, 0},
{&durpb.Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0},
// The smallest valid duration.
{&durpb.Duration{minSeconds, -(1e9 - 1)}, true, false, 0},
{&durpb.Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0},
// The smallest invalid duration above the valid range.
{&durpb.Duration{maxSeconds + 1, 0}, false, false, 0},
{&durpb.Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0},
// The largest invalid duration below the valid range.
{&durpb.Duration{minSeconds - 1, -(1e9 - 1)}, false, false, 0},
{&durpb.Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0},
// One nanosecond past the largest duration representable in Go.
{&durpb.Duration{maxGoSeconds, int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0},
{&durpb.Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0},
// One nanosecond past the smallest duration representable in Go.
{&durpb.Duration{minGoSeconds, int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0},
{&durpb.Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0},
// One second past the largest duration representable in Go.
{&durpb.Duration{maxGoSeconds + 1, int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0},
{&durpb.Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0},
// One second past the smallest duration representable in Go.
{&durpb.Duration{minGoSeconds - 1, int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0},
{&durpb.Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0},
}
func TestValidateDuration(t *testing.T) {

View file

@ -8,14 +8,7 @@
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
PROTO_FILES='
any.proto
duration.proto
empty.proto
struct.proto
timestamp.proto
wrappers.proto
'
PROTO_FILES=(any duration empty struct timestamp wrappers)
function die() {
echo 1>&2 $*
@ -36,31 +29,15 @@ pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd $base
cd "$base"
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
# Pass 1: build mapping from upstream filename to our filename.
declare -A filename_map
for f in $(cd $PKG && find * -name '*.proto'); do
echo -n 1>&2 "looking for latest version of $f... "
up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
echo 1>&2 $up
if [ $(echo $up | wc -w) != "1" ]; then
die "not exactly one match"
fi
filename_map[$up]=$f
done
# Pass 2: copy files
for up in "${!filename_map[@]}"; do
f=${filename_map[$up]}
shortname=$(basename $f | sed 's,\.proto$,,')
cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
for file in ${PROTO_FILES[@]}; do
echo 1>&2 "* $file"
protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
done
# Run protoc once per package.
for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
echo 1>&2 "* $dir"
protoc --go_out=. $dir/*.proto
done
echo 1>&2 "All OK"

View file

@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {

View file

@ -46,32 +46,32 @@ var tests = []struct {
t time.Time
}{
// The timestamp representing the Unix epoch date.
{&tspb.Timestamp{0, 0}, true, utcDate(1970, 1, 1)},
{&tspb.Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)},
// The smallest representable timestamp.
{&tspb.Timestamp{math.MinInt64, math.MinInt32}, false,
{&tspb.Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false,
time.Unix(math.MinInt64, math.MinInt32).UTC()},
// The smallest representable timestamp with non-negative nanos.
{&tspb.Timestamp{math.MinInt64, 0}, false, time.Unix(math.MinInt64, 0).UTC()},
{&tspb.Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()},
// The earliest valid timestamp.
{&tspb.Timestamp{minValidSeconds, 0}, true, utcDate(1, 1, 1)},
{&tspb.Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)},
//"0001-01-01T00:00:00Z"},
// The largest representable timestamp.
{&tspb.Timestamp{math.MaxInt64, math.MaxInt32}, false,
{&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false,
time.Unix(math.MaxInt64, math.MaxInt32).UTC()},
// The largest representable timestamp with nanos in range.
{&tspb.Timestamp{math.MaxInt64, 1e9 - 1}, false,
{&tspb.Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false,
time.Unix(math.MaxInt64, 1e9-1).UTC()},
// The largest valid timestamp.
{&tspb.Timestamp{maxValidSeconds - 1, 1e9 - 1}, true,
{&tspb.Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true,
time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)},
// The smallest invalid timestamp that is larger than the valid range.
{&tspb.Timestamp{maxValidSeconds, 0}, false, time.Unix(maxValidSeconds, 0).UTC()},
{&tspb.Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()},
// A date before the epoch.
{&tspb.Timestamp{-281836800, 0}, true, utcDate(1961, 1, 26)},
{&tspb.Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)},
// A date after the epoch.
{&tspb.Timestamp{1296000000, 0}, true, utcDate(2011, 1, 26)},
{&tspb.Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)},
// A date after the epoch, in the middle of the day.
{&tspb.Timestamp{1296012345, 940483}, true,
{&tspb.Timestamp{Seconds: 1296012345, Nanos: 940483}, true,
time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)},
}
@ -123,8 +123,8 @@ func TestTimestampString(t *testing.T) {
}{
// Not much testing needed because presumably time.Format is
// well-tested.
{&tspb.Timestamp{0, 0}, "1970-01-01T00:00:00Z"},
{&tspb.Timestamp{minValidSeconds - 1, 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"},
{&tspb.Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"},
{&tspb.Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: seconds:-62135596801 before 0001-01-01)"},
} {
got := TimestampString(test.ts)
if got != test.want {
@ -136,3 +136,18 @@ func TestTimestampString(t *testing.T) {
func utcDate(year, month, day int) time.Time {
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
}
func TestTimestampNow(t *testing.T) {
// Bracket the expected time.
before := time.Now()
ts := TimestampNow()
after := time.Now()
tm, err := Timestamp(ts)
if err != nil {
t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err)
}
if tm.Before(before) || tm.After(after) {
t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm)
}
}

View file

@ -34,21 +34,27 @@ type Fuzzer struct {
nilChance float64
minElements int
maxElements int
maxDepth int
}
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
// RandSource, NilChance, or NumElements in any order.
func New() *Fuzzer {
return NewWithSeed(time.Now().UnixNano())
}
func NewWithSeed(seed int64) *Fuzzer {
f := &Fuzzer{
defaultFuzzFuncs: fuzzFuncMap{
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
},
fuzzFuncs: fuzzFuncMap{},
r: rand.New(rand.NewSource(time.Now().UnixNano())),
r: rand.New(rand.NewSource(seed)),
nilChance: .2,
minElements: 1,
maxElements: 10,
maxDepth: 100,
}
return f
}
@ -136,6 +142,14 @@ func (f *Fuzzer) genShouldFill() bool {
return f.r.Float64() > f.nilChance
}
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
// before stopping. This includes struct members, pointers, and map and slice
// elements.
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
f.maxDepth = d
return f
}
// Fuzz recursively fills all of obj's fields with something random. First
// this tries to find a custom fuzz function (see Funcs). If there is no
// custom function this tests whether the object implements fuzz.Interface and,
@ -144,17 +158,19 @@ func (f *Fuzzer) genShouldFill() bool {
// fails, this will generate random values for all primitive fields and then
// recurse for all non-primitives.
//
// Not safe for cyclic or tree-like structs!
// This is safe for cyclic or tree-like structs, up to a limit. Use the
// MaxDepth method to adjust how deep you need it to recurse.
//
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
// Intended for tests, so will panic on bad input or unimplemented fields.
// obj must be a pointer. Only exported (public) fields can be set (thanks,
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
// fields.
func (f *Fuzzer) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.doFuzz(v, 0)
f.fuzzWithContext(v, 0)
}
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
@ -170,7 +186,7 @@ func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
f.doFuzz(v, flagNoCustomFuzz)
f.fuzzWithContext(v, flagNoCustomFuzz)
}
const (
@ -178,69 +194,87 @@ const (
flagNoCustomFuzz uint64 = 1 << iota
)
func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
fc := &fuzzerContext{fuzzer: f}
fc.doFuzz(v, flags)
}
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
// be thread-safe.
type fuzzerContext struct {
fuzzer *Fuzzer
curDepth int
}
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
if fc.curDepth >= fc.fuzzer.maxDepth {
return
}
fc.curDepth++
defer func() { fc.curDepth-- }()
if !v.CanSet() {
return
}
if flags&flagNoCustomFuzz == 0 {
// Check for both pointer and non-pointer custom functions.
if v.CanAddr() && f.tryCustom(v.Addr()) {
if v.CanAddr() && fc.tryCustom(v.Addr()) {
return
}
if f.tryCustom(v) {
if fc.tryCustom(v) {
return
}
}
if fn, ok := fillFuncMap[v.Kind()]; ok {
fn(v, f.r)
fn(v, fc.fuzzer.r)
return
}
switch v.Kind() {
case reflect.Map:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
v.Set(reflect.MakeMap(v.Type()))
n := f.genElementCount()
n := fc.fuzzer.genElementCount()
for i := 0; i < n; i++ {
key := reflect.New(v.Type().Key()).Elem()
f.doFuzz(key, 0)
fc.doFuzz(key, 0)
val := reflect.New(v.Type().Elem()).Elem()
f.doFuzz(val, 0)
fc.doFuzz(val, 0)
v.SetMapIndex(key, val)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Ptr:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
v.Set(reflect.New(v.Type().Elem()))
f.doFuzz(v.Elem(), 0)
fc.doFuzz(v.Elem(), 0)
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Slice:
if f.genShouldFill() {
n := f.genElementCount()
if fc.fuzzer.genShouldFill() {
n := fc.fuzzer.genElementCount()
v.Set(reflect.MakeSlice(v.Type(), n, n))
for i := 0; i < n; i++ {
f.doFuzz(v.Index(i), 0)
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
f.doFuzz(v.Index(i), 0)
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
f.doFuzz(v.Field(i), 0)
fc.doFuzz(v.Field(i), 0)
}
case reflect.Chan:
fallthrough
@ -255,20 +289,20 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
// tryCustom searches for custom handlers, and returns true iff it finds a match
// and successfully randomizes v.
func (f *Fuzzer) tryCustom(v reflect.Value) bool {
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
// First: see if we have a fuzz function for it.
doCustom, ok := f.fuzzFuncs[v.Type()]
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
if !ok {
// Second: see if it can fuzz itself.
if v.CanInterface() {
intf := v.Interface()
if fuzzable, ok := intf.(Interface); ok {
fuzzable.Fuzz(Continue{f: f, Rand: f.r})
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
return true
}
}
// Finally: see if there is a default fuzz function.
doCustom, ok = f.defaultFuzzFuncs[v.Type()]
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
if !ok {
return false
}
@ -294,8 +328,8 @@ func (f *Fuzzer) tryCustom(v reflect.Value) bool {
}
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
f: f,
Rand: f.r,
fc: fc,
Rand: fc.fuzzer.r,
})})
return true
}
@ -310,7 +344,7 @@ type Interface interface {
// Continue can be passed to custom fuzzing functions to allow them to use
// the correct source of randomness and to continue fuzzing their members.
type Continue struct {
f *Fuzzer
fc *fuzzerContext
// For convenience, Continue implements rand.Rand via embedding.
// Use this for generating any randomness if you want your fuzzing
@ -325,7 +359,7 @@ func (c Continue) Fuzz(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
c.f.doFuzz(v, 0)
c.fc.doFuzz(v, 0)
}
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
@ -338,7 +372,7 @@ func (c Continue) FuzzNoCustom(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
c.f.doFuzz(v, flagNoCustomFuzz)
c.fc.doFuzz(v, flagNoCustomFuzz)
}
// RandString makes a random string up to 20 characters long. The returned string

View file

@ -364,7 +364,7 @@ func TestFuzz_noCustom(t *testing.T) {
inner.Str = testPhrase
},
)
c := Continue{f: f, Rand: f.r}
c := Continue{fc: &fuzzerContext{fuzzer: f}, Rand: f.r}
// Fuzzer.Fuzz()
obj1 := Outer{}
@ -426,3 +426,47 @@ func TestFuzz_NumElements(t *testing.T) {
return 4, len(obj.A) == 1
})
}
func TestFuzz_Maxdepth(t *testing.T) {
type S struct {
S *S
}
f := New().NilChance(0)
f.MaxDepth(1)
for i := 0; i < 100; i++ {
obj := S{}
f.Fuzz(&obj)
if obj.S != nil {
t.Errorf("Expected nil")
}
}
f.MaxDepth(3) // field, ptr
for i := 0; i < 100; i++ {
obj := S{}
f.Fuzz(&obj)
if obj.S == nil {
t.Errorf("Expected obj.S not nil")
} else if obj.S.S != nil {
t.Errorf("Expected obj.S.S nil")
}
}
f.MaxDepth(5) // field, ptr, field, ptr
for i := 0; i < 100; i++ {
obj := S{}
f.Fuzz(&obj)
if obj.S == nil {
t.Errorf("Expected obj.S not nil")
} else if obj.S.S == nil {
t.Errorf("Expected obj.S.S not nil")
} else if obj.S.S.S != nil {
t.Errorf("Expected obj.S.S.S nil")
}
}
}

View file

@ -1 +0,0 @@
*.cover

View file

@ -1,15 +0,0 @@
sudo: false
language: go
go:
- 1.6
- 1.7
before_install:
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
script:
- gofmt -l .
- goimports -l .
- go tool vet .
- go test -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

View file

@ -1,27 +0,0 @@
Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement]
(https://cla.developers.google.com/about/google-individual)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use Github pull requests for this purpose.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the
[Software Grant and Corporate Contributor License Agreement]
(https://cla.developers.google.com/about/google-corporate).

View file

@ -1,27 +0,0 @@
Copyright 2016, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,24 +0,0 @@
Google API Extensions for Go
============================
[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go)
[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go)
Google API Extensions for Go (gax-go) is a set of modules which aids the
development of APIs for clients and servers based on `gRPC` and Google API
conventions.
Application code will rarely need to use this library directly,
but the code generated automatically from API definition files can use it
to simplify code generation and to provide more convenient and idiomatic API surface.
**This project is currently experimental and not supported.**
Go Versions
===========
This library requires Go 1.6 or above.
License
=======
BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE)
for more information.

View file

@ -1,149 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"math/rand"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// CallOption is an option used by Invoke to control behaviors of RPC calls.
// CallOption works by modifying relevant fields of CallSettings.
type CallOption interface {
// Resolve applies the option by modifying cs.
Resolve(cs *CallSettings)
}
// Retryer is used by Invoke to determine retry behavior.
type Retryer interface {
// Retry reports whether a request should be retriedand how long to pause before retrying
// if the previous attempt returned with err. Invoke never calls Retry with nil error.
Retry(err error) (pause time.Duration, shouldRetry bool)
}
type retryerOption func() Retryer
func (o retryerOption) Resolve(s *CallSettings) {
s.Retry = o
}
// WithRetry sets CallSettings.Retry to fn.
func WithRetry(fn func() Retryer) CallOption {
return retryerOption(fn)
}
// OnCodes returns a Retryer that retries if and only if
// the previous attempt returns a GRPC error whose error code is stored in cc.
// Pause times between retries are specified by bo.
//
// bo is only used for its parameters; each Retryer has its own copy.
func OnCodes(cc []codes.Code, bo Backoff) Retryer {
return &boRetryer{
backoff: bo,
codes: append([]codes.Code(nil), cc...),
}
}
type boRetryer struct {
backoff Backoff
codes []codes.Code
}
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
c := grpc.Code(err)
for _, rc := range r.codes {
if c == rc {
return r.backoff.Pause(), true
}
}
return 0, false
}
// Backoff implements exponential backoff.
// The wait time between retries is a random value between 0 and the "retry envelope".
// The envelope starts at Initial and increases by the factor of Multiplier every retry,
// but is capped at Max.
type Backoff struct {
// Initial is the initial value of the retry envelope, defaults to 1 second.
Initial time.Duration
// Max is the maximum value of the retry envelope, defaults to 30 seconds.
Max time.Duration
// Multiplier is the factor by which the retry envelope increases.
// It should be greater than 1 and defaults to 2.
Multiplier float64
// cur is the current retry envelope
cur time.Duration
}
func (bo *Backoff) Pause() time.Duration {
if bo.Initial == 0 {
bo.Initial = time.Second
}
if bo.cur == 0 {
bo.cur = bo.Initial
}
if bo.Max == 0 {
bo.Max = 30 * time.Second
}
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
d := time.Duration(rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {
bo.cur = bo.Max
}
return d
}
type grpcOpt []grpc.CallOption
func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
}
type CallSettings struct {
// Retry returns a Retryer to be used to control retry logic of a method call.
// If Retry is nil or the returned Retryer is nil, the call will not be retried.
Retry func() Retryer
// CallOptions to be forwarded to GRPC.
GRPC []grpc.CallOption
}

View file

@ -1,88 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"testing"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var _ Retryer = &boRetryer{}
func TestBackofDefault(t *testing.T) {
backoff := Backoff{}
max := []time.Duration{1, 2, 4, 8, 16, 30, 30, 30, 30, 30}
for i, m := range max {
max[i] = m * time.Second
}
for i, w := range max {
if d := backoff.Pause(); d > w {
t.Errorf("Backoff duration should be at most %s, got %s", w, d)
} else if i < len(max)-1 && backoff.cur != max[i+1] {
t.Errorf("current envelope is %s, want %s", backoff.cur, max[i+1])
}
}
}
func TestBackoffExponential(t *testing.T) {
backoff := Backoff{Initial: 1, Max: 20, Multiplier: 2}
want := []time.Duration{1, 2, 4, 8, 16, 20, 20, 20, 20, 20}
for _, w := range want {
if d := backoff.Pause(); d > w {
t.Errorf("Backoff duration should be at most %s, got %s", w, d)
}
}
}
func TestOnCodes(t *testing.T) {
// Lint errors grpc.Errorf in 1.6. It mistakenly expects the first arg to Errorf to be a string.
errf := grpc.Errorf
apiErr := errf(codes.Unavailable, "")
tests := []struct {
c []codes.Code
retry bool
}{
{nil, false},
{[]codes.Code{codes.DeadlineExceeded}, false},
{[]codes.Code{codes.DeadlineExceeded, codes.Unavailable}, true},
{[]codes.Code{codes.Unavailable}, true},
}
for _, tst := range tests {
b := OnCodes(tst.c, Backoff{})
if _, retry := b.Retry(apiErr); retry != tst.retry {
t.Errorf("retriable codes: %v, error code: %s, retry: %t, want %t", tst.c, grpc.Code(apiErr), retry, tst.retry)
}
}
}

View file

@ -1,40 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package gax contains a set of modules which aid the development of APIs
// for clients and servers based on gRPC and Google API conventions.
//
// Application code will rarely need to use this library directly.
// However, code generated automatically from API definition files can use it
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
//
// This project is currently experimental and not supported.
package gax
const Version = "0.1.0"

View file

@ -1,24 +0,0 @@
package gax
import "bytes"
// XGoogHeader is for use by the Google Cloud Libraries only.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
func XGoogHeader(keyval ...string) string {
if len(keyval) == 0 {
return ""
}
if len(keyval)%2 != 0 {
panic("gax.Header: odd argument count")
}
var buf bytes.Buffer
for i := 0; i < len(keyval); i += 2 {
buf.WriteByte(' ')
buf.WriteString(keyval[i])
buf.WriteByte('/')
buf.WriteString(keyval[i+1])
}
return buf.String()[1:]
}

View file

@ -1,19 +0,0 @@
package gax
import "testing"
func TestXGoogHeader(t *testing.T) {
for _, tst := range []struct {
kv []string
want string
}{
{nil, ""},
{[]string{"abc", "def"}, "abc/def"},
{[]string{"abc", "def", "xyz", "123", "foo", ""}, "abc/def xyz/123 foo/"},
} {
got := XGoogHeader(tst.kv...)
if got != tst.want {
t.Errorf("Header(%q) = %q, want %q", tst.kv, got, tst.want)
}
}
}

View file

@ -1,90 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"time"
"golang.org/x/net/context"
)
// A user defined call stub.
type APICall func(context.Context, CallSettings) error
// Invoke calls the given APICall,
// performing retries as specified by opts, if any.
func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
var settings CallSettings
for _, opt := range opts {
opt.Resolve(&settings)
}
return invoke(ctx, call, settings, Sleep)
}
// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
// If interrupted, Sleep returns ctx.Err().
func Sleep(ctx context.Context, d time.Duration) error {
t := time.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
return ctx.Err()
case <-t.C:
return nil
}
}
type sleeper func(ctx context.Context, d time.Duration) error
// invoke implements Invoke, taking an additional sleeper argument for testing.
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
var retryer Retryer
for {
err := call(ctx, settings)
if err == nil {
return nil
}
if settings.Retry == nil {
return err
}
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r
} else {
return err
}
}
if d, ok := retryer.Retry(err); !ok {
return err
} else if err = sp(ctx, d); err != nil {
return err
}
}
}

View file

@ -1,156 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"errors"
"testing"
"time"
"golang.org/x/net/context"
)
var canceledContext context.Context
func init() {
ctx, cancel := context.WithCancel(context.Background())
cancel()
canceledContext = ctx
}
// recordSleeper is a test implementation of sleeper.
type recordSleeper int
func (s *recordSleeper) sleep(ctx context.Context, _ time.Duration) error {
*s++
return ctx.Err()
}
type boolRetryer bool
func (r boolRetryer) Retry(err error) (time.Duration, bool) { return 0, bool(r) }
func TestInvokeSuccess(t *testing.T) {
apiCall := func(context.Context, CallSettings) error { return nil }
var sp recordSleeper
err := invoke(context.Background(), apiCall, CallSettings{}, sp.sleep)
if err != nil {
t.Errorf("found error %s, want nil", err)
}
if sp != 0 {
t.Errorf("slept %d times, should not have slept since the call succeeded", int(sp))
}
}
func TestInvokeNoRetry(t *testing.T) {
apiErr := errors.New("foo error")
apiCall := func(context.Context, CallSettings) error { return apiErr }
var sp recordSleeper
err := invoke(context.Background(), apiCall, CallSettings{}, sp.sleep)
if err != apiErr {
t.Errorf("found error %s, want %s", err, apiErr)
}
if sp != 0 {
t.Errorf("slept %d times, should not have slept since retry is not specified", int(sp))
}
}
func TestInvokeNilRetry(t *testing.T) {
apiErr := errors.New("foo error")
apiCall := func(context.Context, CallSettings) error { return apiErr }
var settings CallSettings
WithRetry(func() Retryer { return nil }).Resolve(&settings)
var sp recordSleeper
err := invoke(context.Background(), apiCall, settings, sp.sleep)
if err != apiErr {
t.Errorf("found error %s, want %s", err, apiErr)
}
if sp != 0 {
t.Errorf("slept %d times, should not have slept since retry is not specified", int(sp))
}
}
func TestInvokeNeverRetry(t *testing.T) {
apiErr := errors.New("foo error")
apiCall := func(context.Context, CallSettings) error { return apiErr }
var settings CallSettings
WithRetry(func() Retryer { return boolRetryer(false) }).Resolve(&settings)
var sp recordSleeper
err := invoke(context.Background(), apiCall, settings, sp.sleep)
if err != apiErr {
t.Errorf("found error %s, want %s", err, apiErr)
}
if sp != 0 {
t.Errorf("slept %d times, should not have slept since retry is not specified", int(sp))
}
}
func TestInvokeRetry(t *testing.T) {
const target = 3
retryNum := 0
apiErr := errors.New("foo error")
apiCall := func(context.Context, CallSettings) error {
retryNum++
if retryNum < target {
return apiErr
}
return nil
}
var settings CallSettings
WithRetry(func() Retryer { return boolRetryer(true) }).Resolve(&settings)
var sp recordSleeper
err := invoke(context.Background(), apiCall, settings, sp.sleep)
if err != nil {
t.Errorf("found error %s, want nil, call should have succeeded after %d tries", err, target)
}
if sp != target-1 {
t.Errorf("retried %d times, want %d", int(sp), int(target-1))
}
}
func TestInvokeRetryTimeout(t *testing.T) {
apiErr := errors.New("foo error")
apiCall := func(context.Context, CallSettings) error { return apiErr }
var settings CallSettings
WithRetry(func() Retryer { return boolRetryer(true) }).Resolve(&settings)
var sp recordSleeper
err := invoke(canceledContext, apiCall, settings, sp.sleep)
if err != context.Canceled {
t.Errorf("found error %s, want %s", err, context.Canceled)
}
}

View file

@ -1,176 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"errors"
"fmt"
"strings"
)
type matcher interface {
match([]string) (int, error)
String() string
}
type segment struct {
matcher
name string
}
type labelMatcher string
func (ls labelMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, fmt.Errorf("expected %s but no more segments found", ls)
}
if segments[0] != string(ls) {
return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
}
return 1, nil
}
func (ls labelMatcher) String() string {
return string(ls)
}
type wildcardMatcher int
func (wm wildcardMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, errors.New("no more segments found")
}
return 1, nil
}
func (wm wildcardMatcher) String() string {
return "*"
}
type pathWildcardMatcher int
func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
length := len(segments) - int(pwm)
if length <= 0 {
return 0, errors.New("not sufficient segments are supplied for path wildcard")
}
return length, nil
}
func (pwm pathWildcardMatcher) String() string {
return "**"
}
type ParseError struct {
Pos int
Template string
Message string
}
func (pe ParseError) Error() string {
return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
}
// PathTemplate manages the template to build and match with paths used
// by API services. It holds a template and variable names in it, and
// it can extract matched patterns from a path string or build a path
// string from a binding.
//
// See http.proto in github.com/googleapis/googleapis/ for the details of
// the template syntax.
type PathTemplate struct {
segments []segment
}
// NewPathTemplate parses a path template, and returns a PathTemplate
// instance if successful.
func NewPathTemplate(template string) (*PathTemplate, error) {
return parsePathTemplate(template)
}
// MustCompilePathTemplate is like NewPathTemplate but panics if the
// expression cannot be parsed. It simplifies safe initialization of
// global variables holding compiled regular expressions.
func MustCompilePathTemplate(template string) *PathTemplate {
pt, err := NewPathTemplate(template)
if err != nil {
panic(err)
}
return pt
}
// Match attempts to match the given path with the template, and returns
// the mapping of the variable name to the matched pattern string.
func (pt *PathTemplate) Match(path string) (map[string]string, error) {
paths := strings.Split(path, "/")
values := map[string]string{}
for _, segment := range pt.segments {
length, err := segment.match(paths)
if err != nil {
return nil, err
}
if segment.name != "" {
value := strings.Join(paths[:length], "/")
if oldValue, ok := values[segment.name]; ok {
values[segment.name] = oldValue + "/" + value
} else {
values[segment.name] = value
}
}
paths = paths[length:]
}
if len(paths) != 0 {
return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
}
return values, nil
}
// Render creates a path string from its template and the binding from
// the variable name to the value.
func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
result := make([]string, 0, len(pt.segments))
var lastVariableName string
for _, segment := range pt.segments {
name := segment.name
if lastVariableName != "" && name == lastVariableName {
continue
}
lastVariableName = name
if name == "" {
result = append(result, segment.String())
} else if value, ok := binding[name]; ok {
result = append(result, value)
} else {
return "", fmt.Errorf("%s is not found", name)
}
}
built := strings.Join(result, "/")
return built, nil
}

View file

@ -1,227 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"fmt"
"io"
"strings"
)
// This parser follows the syntax of path templates, from
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
// The differences are that there is no custom verb, we allow the initial slash
// to be absent, and that we are not strict as
// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
// literals.
type pathTemplateParser struct {
r *strings.Reader
runeCount int // the number of the current rune in the original string
nextVar int // the number to use for the next unnamed variable
seenName map[string]bool // names we've seen already
seenPathWildcard bool // have we seen "**" already?
}
func parsePathTemplate(template string) (pt *PathTemplate, err error) {
p := &pathTemplateParser{
r: strings.NewReader(template),
seenName: map[string]bool{},
}
// Handle panics with strings like errors.
// See pathTemplateParser.error, below.
defer func() {
if x := recover(); x != nil {
errmsg, ok := x.(errString)
if !ok {
panic(x)
}
pt = nil
err = ParseError{p.runeCount, template, string(errmsg)}
}
}()
segs := p.template()
// If there is a path wildcard, set its length. We can't do this
// until we know how many segments we've got all together.
for i, seg := range segs {
if _, ok := seg.matcher.(pathWildcardMatcher); ok {
segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
break
}
}
return &PathTemplate{segments: segs}, nil
}
// Used to indicate errors "thrown" by this parser. We don't use string because
// many parts of the standard library panic with strings.
type errString string
// Terminates parsing immediately with an error.
func (p *pathTemplateParser) error(msg string) {
panic(errString(msg))
}
// Template = [ "/" ] Segments
func (p *pathTemplateParser) template() []segment {
var segs []segment
if p.consume('/') {
// Initial '/' needs an initial empty matcher.
segs = append(segs, segment{matcher: labelMatcher("")})
}
return append(segs, p.segments("")...)
}
// Segments = Segment { "/" Segment }
func (p *pathTemplateParser) segments(name string) []segment {
var segs []segment
for {
subsegs := p.segment(name)
segs = append(segs, subsegs...)
if !p.consume('/') {
break
}
}
return segs
}
// Segment = "*" | "**" | LITERAL | Variable
func (p *pathTemplateParser) segment(name string) []segment {
if p.consume('*') {
if name == "" {
name = fmt.Sprintf("$%d", p.nextVar)
p.nextVar++
}
if p.consume('*') {
if p.seenPathWildcard {
p.error("multiple '**' disallowed")
}
p.seenPathWildcard = true
// We'll change 0 to the right number at the end.
return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
}
return []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if p.consume('{') {
if name != "" {
p.error("recursive named bindings are not allowed")
}
return p.variable()
}
return []segment{{name: name, matcher: labelMatcher(p.literal())}}
}
// Variable = "{" FieldPath [ "=" Segments ] "}"
// "{" is already consumed.
func (p *pathTemplateParser) variable() []segment {
// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
name := p.literal()
if p.seenName[name] {
p.error(name + " appears multiple times")
}
p.seenName[name] = true
var segs []segment
if p.consume('=') {
segs = p.segments(name)
} else {
// "{var}" is equivalent to "{var=*}"
segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if !p.consume('}') {
p.error("expected '}'")
}
return segs
}
// A literal is any sequence of characters other than a few special ones.
// The list of stop characters is not quite the same as in the template RFC.
func (p *pathTemplateParser) literal() string {
lit := p.consumeUntil("/*}{=")
if lit == "" {
p.error("empty literal")
}
return lit
}
// Read runes until EOF or one of the runes in stopRunes is encountered.
// If the latter, unread the stop rune. Return the accumulated runes as a string.
func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
var runes []rune
for {
r, ok := p.readRune()
if !ok {
break
}
if strings.IndexRune(stopRunes, r) >= 0 {
p.unreadRune()
break
}
runes = append(runes, r)
}
return string(runes)
}
// If the next rune is r, consume it and return true.
// Otherwise, leave the input unchanged and return false.
func (p *pathTemplateParser) consume(r rune) bool {
rr, ok := p.readRune()
if !ok {
return false
}
if r == rr {
return true
}
p.unreadRune()
return false
}
// Read the next rune from the input. Return it.
// The second return value is false at EOF.
func (p *pathTemplateParser) readRune() (rune, bool) {
r, _, err := p.r.ReadRune()
if err == io.EOF {
return r, false
}
if err != nil {
p.error(err.Error())
}
p.runeCount++
return r, true
}
// Put the last rune that was read back on the input.
func (p *pathTemplateParser) unreadRune() {
if err := p.r.UnreadRune(); err != nil {
p.error(err.Error())
}
p.runeCount--
}

View file

@ -1,211 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import "testing"
func TestPathTemplateMatchRender(t *testing.T) {
testCases := []struct {
message string
template string
path string
values map[string]string
}{
{
"base",
"buckets/*/*/objects/*",
"buckets/f/o/objects/bar",
map[string]string{"$0": "f", "$1": "o", "$2": "bar"},
},
{
"path wildcards",
"bar/**/foo/*",
"bar/foo/foo/foo/bar",
map[string]string{"$0": "foo/foo", "$1": "bar"},
},
{
"named binding",
"buckets/{foo}/objects/*",
"buckets/foo/objects/bar",
map[string]string{"$0": "bar", "foo": "foo"},
},
{
"named binding with colon",
"buckets/{foo}/objects/*",
"buckets/foo:boo/objects/bar",
map[string]string{"$0": "bar", "foo": "foo:boo"},
},
{
"named binding with complex patterns",
"buckets/{foo=x/*/y/**}/objects/*",
"buckets/x/foo/y/bar/baz/objects/quox",
map[string]string{"$0": "quox", "foo": "x/foo/y/bar/baz"},
},
{
"starts with slash",
"/foo/*",
"/foo/bar",
map[string]string{"$0": "bar"},
},
}
for _, testCase := range testCases {
pt, err := NewPathTemplate(testCase.template)
if err != nil {
t.Errorf("[%s] Failed to parse template %s: %v", testCase.message, testCase.template, err)
continue
}
values, err := pt.Match(testCase.path)
if err != nil {
t.Errorf("[%s] PathTemplate '%s' failed to match with '%s', %v", testCase.message, testCase.template, testCase.path, err)
continue
}
for key, expected := range testCase.values {
actual, ok := values[key]
if !ok {
t.Errorf("[%s] The matched data misses the value for %s", testCase.message, key)
continue
}
delete(values, key)
if actual != expected {
t.Errorf("[%s] Failed to match: value for '%s' is expected '%s' but is actually '%s'", testCase.message, key, expected, actual)
}
}
if len(values) != 0 {
t.Errorf("[%s] The matched data has unexpected keys: %v", testCase.message, values)
}
built, err := pt.Render(testCase.values)
if err != nil || built != testCase.path {
t.Errorf("[%s] Built path '%s' is different from the expected '%s', %v", testCase.message, built, testCase.path, err)
}
}
}
func TestPathTemplateMatchFailure(t *testing.T) {
testCases := []struct {
message string
template string
path string
}{
{
"too many paths",
"buckets/*/*/objects/*",
"buckets/f/o/o/objects/bar",
},
{
"missing last path",
"buckets/*/*/objects/*",
"buckets/f/o/objects",
},
{
"too many paths at end",
"buckets/*/*/objects/*",
"buckets/f/o/objects/too/long",
},
}
for _, testCase := range testCases {
pt, err := NewPathTemplate(testCase.template)
if err != nil {
t.Errorf("[%s] Failed to parse path %s: %v", testCase.message, testCase.template, err)
continue
}
if values, err := pt.Match(testCase.path); err == nil {
t.Errorf("[%s] PathTemplate %s doesn't expect to match %s, but succeeded somehow. Match result: %v", testCase.message, testCase.template, testCase.path, values)
}
}
}
func TestPathTemplateRenderTooManyValues(t *testing.T) {
// Test cases where Render() succeeds but Match() doesn't return the same map.
testCases := []struct {
message string
template string
values map[string]string
expected string
}{
{
"too many",
"bar/*/foo/*",
map[string]string{"$0": "_1", "$1": "_2", "$2": "_3"},
"bar/_1/foo/_2",
},
}
for _, testCase := range testCases {
pt, err := NewPathTemplate(testCase.template)
if err != nil {
t.Errorf("[%s] Failed to parse template %s (error %v)", testCase.message, testCase.template, err)
continue
}
if result, err := pt.Render(testCase.values); err != nil || result != testCase.expected {
t.Errorf("[%s] Failed to build the path (expected '%s' but returned '%s'", testCase.message, testCase.expected, result)
}
}
}
func TestPathTemplateParseErrors(t *testing.T) {
testCases := []struct {
message string
template string
}{
{
"multiple path wildcards",
"foo/**/bar/**",
},
{
"recursive named bindings",
"foo/{foo=foo/{bar}/baz/*}/baz/*",
},
{
"complicated multiple path wildcards patterns",
"foo/{foo=foo/**/bar/*}/baz/**",
},
{
"consective slashes",
"foo//bar",
},
{
"invalid variable pattern",
"foo/{foo=foo/*/}bar",
},
{
"same name multiple times",
"foo/{foo}/bar/{foo}",
},
{
"empty string after '='",
"foo/{foo=}/bar",
},
}
for _, testCase := range testCases {
if pt, err := NewPathTemplate(testCase.template); err == nil {
t.Errorf("[%s] Template '%s' should fail to be parsed, but succeeded and returned %+v", testCase.message, testCase.template, pt)
}
}
}

View file

@ -49,6 +49,8 @@ Usage of easyjson:
process the whole package instead of just the given file
-snake_case
use snake_case names instead of CamelCase by default
-lower_camel_case
use lowerCamelCase instead of CamelCase by default
-stubs
only generate stubs for marshaler/unmarshaler funcs
```
@ -60,7 +62,7 @@ generated. For example:
```go
//easyjson:json
struct A{}
type A struct {}
```
Additional option notes:

View file

@ -1,8 +1,8 @@
language: go
go:
- 1.6
- 1.7
- 1.8
- tip
install:

View file

@ -4,7 +4,7 @@
[![CircleCI](https://circleci.com/gh/openzipkin/zipkin-go-opentracing.svg?style=shield)](https://circleci.com/gh/openzipkin/zipkin-go-opentracing)
[![GoDoc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing?status.svg)](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing)
[![Go Report Card](https://goreportcard.com/badge/github.com/openzipkin/zipkin-go-opentracing)](https://goreportcard.com/report/github.com/openzipkin/zipkin-go-opentracing)
[![Sourcegraph](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing/-/badge.svg)](https://sourcegraph.com/github.com/openzipkin/zipkin-go-opentracing?badge)
[OpenTracing](http://opentracing.io) Tracer implementation for [Zipkin](http://zipkin.io) in Go.

View file

@ -134,7 +134,7 @@ const string HTTP_METHOD = "http.method"
* "/resource/{resource_id}". In systems where only equals queries are available, searching for
* http/path=/resource won't match if the actual request was /resource/abcd-ff.
*
* Historical note: This was commonly expressed as "http.uri" in zipkin, eventhough it was most
* Historical note: This was commonly expressed as "http.uri" in zipkin, even though it was most
* often just a path.
*/
const string HTTP_PATH = "http.path"
@ -286,7 +286,7 @@ struct Annotation {
* Microseconds from epoch.
*
* This value should use the most precise value possible. For example,
* gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
* gettimeofday or multiplying currentTimeMillis by 1000.
*/
1: i64 timestamp
/**
@ -420,7 +420,7 @@ struct Span {
* precise value possible. For example, gettimeofday or syncing nanoTime
* against a tick of currentTimeMillis.
*
* For compatibilty with instrumentation that precede this field, collectors
* For compatibility with instrumentation that precede this field, collectors
* or span stores can derive this via Annotation.timestamp.
* For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
*

View file

@ -35,8 +35,14 @@ type HTTPCollector struct {
shutdown chan error
sendMutex *sync.Mutex
batchMutex *sync.Mutex
reqCallback RequestCallback
}
// RequestCallback receives the initialized request from the Collector before
// sending it over the wire. This allows one to plug in additional headers or
// do other customization.
type RequestCallback func(*http.Request)
// HTTPOption sets a parameter for the HttpCollector
type HTTPOption func(c *HTTPCollector)
@ -71,6 +77,17 @@ func HTTPBatchInterval(d time.Duration) HTTPOption {
return func(c *HTTPCollector) { c.batchInterval = d }
}
// HTTPClient sets a custom http client to use.
func HTTPClient(client *http.Client) HTTPOption {
return func(c *HTTPCollector) { c.client = client }
}
// HTTPRequestCallback registers a callback function to adjust the collector
// *http.Request before it sends the request to Zipkin.
func HTTPRequestCallback(rc RequestCallback) HTTPOption {
return func(c *HTTPCollector) { c.reqCallback = rc }
}
// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http
// url for handle post request. timeout is passed to http client. queueSize control
// the maximum size of buffer of async queue. The logger is used to log errors,
@ -194,6 +211,9 @@ func (c *HTTPCollector) send() error {
return err
}
req.Header.Set("Content-Type", "application/x-thrift")
if c.reqCallback != nil {
c.reqCallback(req)
}
if _, err = c.client.Do(req); err != nil {
c.logger.Log("err", err.Error())
return err

Some files were not shown because too many files have changed in this diff Show more