Update vendor libraries except client-go, apimachinery and ugorji/go (#1197)

This fix updates vendor libraries except client-go, apimachinery
and ugorji/go, as github.com/ugorji/go/codec is causing compatibilities issues.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2017-11-03 00:20:15 -07:00 committed by Miek Gieben
parent af6086d653
commit 1fc0c16968
370 changed files with 15091 additions and 5902 deletions

60
Gopkg.lock generated
View file

@ -16,8 +16,8 @@
[[projects]] [[projects]]
name = "github.com/Shopify/sarama" name = "github.com/Shopify/sarama"
packages = ["."] packages = ["."]
revision = "c01858abb625b73a3af51d0798e4ad42c8147093" revision = "bbdbe644099b7fdc8327d5cc69c030945188b2e9"
version = "v1.12.0" version = "v1.13.0"
[[projects]] [[projects]]
name = "github.com/apache/thrift" name = "github.com/apache/thrift"
@ -40,8 +40,8 @@
[[projects]] [[projects]]
name = "github.com/coreos/etcd" name = "github.com/coreos/etcd"
packages = ["client","pkg/pathutil","pkg/srv","pkg/types","version"] packages = ["client","pkg/pathutil","pkg/srv","pkg/types","version"]
revision = "9d43462d174c664f5edf313dec0de31e1ef4ed47" revision = "f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b"
version = "v3.2.6" version = "v3.2.9"
[[projects]] [[projects]]
name = "github.com/coreos/go-semver" name = "github.com/coreos/go-semver"
@ -59,7 +59,7 @@
branch = "master" branch = "master"
name = "github.com/dnstap/golang-dnstap" name = "github.com/dnstap/golang-dnstap"
packages = ["."] packages = ["."]
revision = "c32b266c26b040b00f522a0183527a9eb2a1cf8b" revision = "2cf77a2b5e11ac8d0ba3892772ac8e1f7b528344"
[[projects]] [[projects]]
name = "github.com/docker/distribution" name = "github.com/docker/distribution"
@ -88,8 +88,8 @@
[[projects]] [[projects]]
name = "github.com/emicklei/go-restful" name = "github.com/emicklei/go-restful"
packages = [".","log"] packages = [".","log"]
revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594" revision = "5741799b275a3c4a5a9623a993576d7545cf7b5c"
version = "v1.2" version = "v2.4.0"
[[projects]] [[projects]]
name = "github.com/emicklei/go-restful-swagger12" name = "github.com/emicklei/go-restful-swagger12"
@ -143,13 +143,13 @@
branch = "master" branch = "master"
name = "github.com/go-openapi/loads" name = "github.com/go-openapi/loads"
packages = ["."] packages = ["."]
revision = "a80dea3052f00e5f032e860dd7355cd0cc67e24d" revision = "c3e1ca4c0b6160cac10aeef7e8b425cc95b9c820"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/go-openapi/spec" name = "github.com/go-openapi/spec"
packages = ["."] packages = ["."]
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7" revision = "84b5bee7bcb76f3d17bcbaf421bac44bd5709ca6"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -166,8 +166,8 @@
[[projects]] [[projects]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
packages = ["proto","sortkeys"] packages = ["proto","sortkeys"]
revision = "100ba4e885062801d56799d78530b73b178a78f3" revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
version = "v0.4" version = "v0.5"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -178,8 +178,8 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto","ptypes/any"] packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e" revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -197,7 +197,7 @@
branch = "master" branch = "master"
name = "github.com/grpc-ecosystem/grpc-opentracing" name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"] packages = ["go/otgrpc"]
revision = "6c130eed1e297e1aa4d415a50c90d0c81c52677e" revision = "a570af39704b9f3d4bb530d83192a99ea6337d5a"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -214,14 +214,14 @@
[[projects]] [[projects]]
name = "github.com/imdario/mergo" name = "github.com/imdario/mergo"
packages = ["."] packages = ["."]
revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc" revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874"
version = "0.2.2" version = "0.2.4"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/juju/ratelimit" name = "github.com/juju/ratelimit"
packages = ["."] packages = ["."]
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342" revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -233,7 +233,7 @@
branch = "master" branch = "master"
name = "github.com/mailru/easyjson" name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"] packages = ["buffer","jlexer","jwriter"]
revision = "3b3bbef4e2d9a6f24d7ee73a894afbc064f0e057" revision = "4d347d79dea0067c945f374f990601decb08abb5"
[[projects]] [[projects]]
name = "github.com/matttproud/golang_protobuf_extensions" name = "github.com/matttproud/golang_protobuf_extensions"
@ -245,7 +245,7 @@
branch = "master" branch = "master"
name = "github.com/mitchellh/mapstructure" name = "github.com/mitchellh/mapstructure"
packages = ["."] packages = ["."]
revision = "d0303fe809921458f417bcf828397a65db30a7e4" revision = "06020f85339e21b2478f756a78e295255ffa4d6a"
[[projects]] [[projects]]
name = "github.com/opentracing/opentracing-go" name = "github.com/opentracing/opentracing-go"
@ -262,8 +262,8 @@
[[projects]] [[projects]]
name = "github.com/pierrec/lz4" name = "github.com/pierrec/lz4"
packages = ["."] packages = ["."]
revision = "5c9560bfa9ace2bf86080bf40d46b34ae44604df" revision = "08c27939df1bd95e881e2c2367a749964ad1fceb"
version = "v1.0" version = "v1.0.1"
[[projects]] [[projects]]
name = "github.com/pierrec/xxHash" name = "github.com/pierrec/xxHash"
@ -287,13 +287,13 @@
branch = "master" branch = "master"
name = "github.com/prometheus/common" name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
revision = "61f87aac8082fa8c3c5655c7608d7478d46ac2ad" revision = "1bab55dd05dbff384524a6a1c99006d9eb5f139b"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [".","xfs"] packages = [".","xfs"]
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2" revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -317,31 +317,31 @@
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ssh/terminal"] packages = ["ssh/terminal"]
revision = "81e90905daefcd6fd217b62423c0908922eadb30" revision = "bd6f299fb381e4c3393d1c4b1f0b94f5e77650c8"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix","windows"] packages = ["unix","windows"]
revision = "2d6f6f883a06fc0d5f4b14a81e4c28705ea64c15" revision = "46eaec7899b1dd945c25db17c78ba3f0c58a6613"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm","width"] packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm","width"]
revision = "ac87088df8ef557f1e32cd00ed0b6fbc3f7ddafb" revision = "88f656faf3f37f690df1a32515b479415e1a6769"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "ee236bd376b077c7a89f260c026c4735b195e459" revision = "11c7f9e547da6db876260ce49ea7536985904c9b"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"] packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","stats","status","tap","transport"]
revision = "b3ddf786825de56a4178401b7e174ee332173b66" revision = "61d37c5d657a47e4404fd6823bd598341a2595de"
version = "v1.5.2" version = "v1.7.1"
[[projects]] [[projects]]
name = "gopkg.in/inf.v0" name = "gopkg.in/inf.v0"

View file

@ -1,7 +1,8 @@
language: go language: go
go: go:
- 1.7.3 - 1.7.x
- 1.8 - 1.8.x
- 1.9.x
env: env:
global: global:
@ -12,7 +13,8 @@ env:
- DEBUG=true - DEBUG=true
matrix: matrix:
- KAFKA_VERSION=0.9.0.1 - KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.0 - KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.1
before_install: before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} - export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}

View file

@ -1,5 +1,34 @@
# Changelog # Changelog
#### Version 1.13.0 (2017-10-04)
New Features:
- Support for FetchRequest version 3
([#905](https://github.com/Shopify/sarama/pull/905)).
- Permit setting version on mock FetchResponses
([#939](https://github.com/Shopify/sarama/pull/939)).
- Add a configuration option to support storing only minimal metadata for
extremely large clusters
([#937](https://github.com/Shopify/sarama/pull/937)).
- Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
([#932](https://github.com/Shopify/sarama/pull/932)).
Improvements:
- Provide the block-level timestamp when consuming compressed messages
([#885](https://github.com/Shopify/sarama/issues/885)).
- `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
by the broker, which can be meaningful
([#930](https://github.com/Shopify/sarama/pull/930)).
- Use a `Ticker` to reduce consumer timer overhead at the cost of higher
variance in the actual timeout
([#933](https://github.com/Shopify/sarama/pull/933)).
Bug Fixes:
- Gracefully handle messages with negative timestamps
([#907](https://github.com/Shopify/sarama/pull/907)).
- Raise a proper error when encountering an unknown message version
([#940](https://github.com/Shopify/sarama/pull/940)).
#### Version 1.12.0 (2017-05-08) #### Version 1.12.0 (2017-05-08)
New Features: New Features:

View file

@ -20,7 +20,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support grace period for older releases. This means we currently officially support
Go 1.8 and 1.7, and Kafka 0.10 and 0.9, although older releases are Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
still likely to work. still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service. Sarama follows semantic versioning and provides API stability via the gopkg.in service.

View file

@ -141,6 +141,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
} }
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics // do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata() err := client.RefreshMetadata()
switch err { switch err {
@ -154,6 +155,7 @@ func NewClient(addrs []string, conf *Config) (Client, error) {
_ = client.Close() _ = client.Close()
return nil, err return nil, err
} }
}
go withRecover(client.backgroundMetadataUpdater) go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client") Logger.Println("Successfully initialized new client")
@ -297,7 +299,7 @@ func (client *client) Replicas(topic string, partitionID int32) ([]int32, error)
if metadata.Err == ErrReplicaNotAvailable { if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err return nil, metadata.Err
} }
return dupeAndSort(metadata.Replicas), nil return dupInt32Slice(metadata.Replicas), nil
} }
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
@ -322,7 +324,7 @@ func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32,
if metadata.Err == ErrReplicaNotAvailable { if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err return nil, metadata.Err
} }
return dupeAndSort(metadata.Isr), nil return dupInt32Slice(metadata.Isr), nil
} }
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
@ -605,7 +607,20 @@ func (client *client) backgroundMetadataUpdater() {
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
if err := client.RefreshMetadata(); err != nil { topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
Logger.Println("Client background metadata topic load:", err)
break
} else if len(specificTopics) == 0 {
Logger.Println("Client background metadata update: no specific topics to update")
break
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
Logger.Println("Client background metadata update:", err) Logger.Println("Client background metadata update:", err)
} }
case <-client.closer: case <-client.closer:

View file

@ -188,12 +188,12 @@ func TestClientMetadata(t *testing.T) {
replicas, err = client.Replicas("my_topic", 0) replicas, err = client.Replicas("my_topic", 0)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} else if replicas[0] != 1 { } else if replicas[0] != 3 {
t.Error("Incorrect (or unsorted) replica") t.Error("Incorrect (or sorted) replica")
} else if replicas[1] != 3 { } else if replicas[1] != 1 {
t.Error("Incorrect (or unsorted) replica") t.Error("Incorrect (or sorted) replica")
} else if replicas[2] != 5 { } else if replicas[2] != 5 {
t.Error("Incorrect (or unsorted) replica") t.Error("Incorrect (or sorted) replica")
} }
isr, err = client.InSyncReplicas("my_topic", 0) isr, err = client.InSyncReplicas("my_topic", 0)
@ -201,10 +201,10 @@ func TestClientMetadata(t *testing.T) {
t.Error(err) t.Error(err)
} else if len(isr) != 2 { } else if len(isr) != 2 {
t.Error("Client returned incorrect ISRs for partition:", isr) t.Error("Client returned incorrect ISRs for partition:", isr)
} else if isr[0] != 1 { } else if isr[0] != 5 {
t.Error("Incorrect (or unsorted) ISR:", isr) t.Error("Incorrect (or sorted) ISR:", isr)
} else if isr[1] != 5 { } else if isr[1] != 1 {
t.Error("Incorrect (or unsorted) ISR:", isr) t.Error("Incorrect (or sorted) ISR:", isr)
} }
leader.Close() leader.Close()

View file

@ -72,6 +72,12 @@ type Config struct {
// Defaults to 10 minutes. Set to 0 to disable. Similar to // Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version. // `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
} }
// Producer is the namespace for configuration related to producing messages, // Producer is the namespace for configuration related to producing messages,
@ -99,7 +105,10 @@ type Config struct {
Partitioner PartitionerConstructor Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true, // Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. // you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct { Return struct {
// If enabled, successfully delivered messages will be returned on the // If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled). // Successes channel (default disabled).
@ -187,11 +196,23 @@ type Config struct {
// Equivalent to the JVM's `fetch.wait.max.ms`. // Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to process // The maximum amount of time the consumer expects a message takes to
// for the user. If writing to the Messages channel takes longer than this, // process for the user. If writing to the Messages channel takes longer
// that partition will stop fetching more messages until it can proceed again. // than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is // Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true, // Return specifies what channels will be populated. If they are set to true,
@ -260,6 +281,7 @@ func NewConfig() *Config {
c.Metadata.Retry.Max = 3 c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000 c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal c.Producer.RequiredAcks = WaitForLocal

View file

@ -14,7 +14,8 @@ type ConsumerMessage struct {
Topic string Topic string
Partition int32 Partition int32
Offset int64 Offset int64
Timestamp time.Time // only set if kafka is version 0.10+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
} }
// ConsumerError is what is provided to the user when an error occurs. // ConsumerError is what is provided to the user when an error occurs.
@ -246,9 +247,9 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// PartitionConsumer // PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close() // PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically // AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// when it passes out of scope. // of scope.
// //
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range // The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported // loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
@ -257,19 +258,25 @@ func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set // By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement // your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. // or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface { type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// return immediately, after which you should wait until the 'messages' and // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// 'errors' channel are drained. It is required to call this function, or // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// Close before a consumer object passes out of scope, as it will otherwise // this before calling Close on the underlying client.
// leak memory. You must call this before calling Close on the underlying client.
AsyncClose() AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It is required to // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// call this function (or AsyncClose) before a consumer object passes out of // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// scope, as it will otherwise leak memory. You must call this before calling // the Messages channel when this function is called, you will be competing with Close for messages; consider
// Close on the underlying client. // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error Close() error
// Messages returns the read channel for the messages that are returned by // Messages returns the read channel for the messages that are returned by
@ -433,25 +440,20 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
func (child *partitionConsumer) responseFeeder() { func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage var msgs []*ConsumerMessage
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) msgSent := false
expireTimedOut := false
feederLoop: feederLoop:
for response := range child.feeder { for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response) msgs, child.responseResult = child.parseResponse(response)
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
for i, msg := range msgs { for i, msg := range msgs {
if !expiryTimer.Stop() && !expireTimedOut { messageSelect:
// expiryTimer was expired; clear out the waiting msg
<-expiryTimer.C
}
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
expireTimedOut = false
select { select {
case child.messages <- msg: case child.messages <- msg:
case <-expiryTimer.C: msgSent = true
expireTimedOut = true case <-expiryTicker.C:
if !msgSent {
child.responseResult = errTimedOut child.responseResult = errTimedOut
child.broker.acks.Done() child.broker.acks.Done()
for _, msg = range msgs[i:] { for _, msg = range msgs[i:] {
@ -459,9 +461,16 @@ feederLoop:
} }
child.broker.input <- child child.broker.input <- child
continue feederLoop continue feederLoop
} else {
// current message has not been sent, return to select
// statement
msgSent = false
goto messageSelect
}
} }
} }
expiryTicker.Stop()
child.broker.acks.Done() child.broker.acks.Done()
} }
@ -526,6 +535,7 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
Value: msg.Msg.Value, Value: msg.Msg.Value,
Offset: offset, Offset: offset,
Timestamp: msg.Msg.Timestamp, Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
}) })
child.offset = offset + 1 child.offset = offset + 1
} else { } else {
@ -726,6 +736,10 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2 request.Version = 2
} }
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
for child := range bc.subscriptions { for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)

View file

@ -803,6 +803,48 @@ func TestConsumerOffsetOutOfRange(t *testing.T) {
broker0.Close() broker0.Close()
} }
func TestConsumerExpiryTicker(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse1 := &FetchResponse{}
for i := 1; i <= 8; i++ {
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
}
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 1),
"FetchRequest": NewMockSequence(fetchResponse1),
})
config := NewConfig()
config.ChannelBufferSize = 0
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
master, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 through 8 are read
for i := 1; i <= 8; i++ {
assertMessageOffset(t, <-consumer.Messages(), int64(i))
time.Sleep(2 * time.Millisecond)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
if msg.Offset != expectedOffset { if msg.Offset != expectedOffset {
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)

View file

@ -2,6 +2,7 @@ package sarama
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"hash/crc32" "hash/crc32"
) )
@ -27,8 +28,9 @@ func (c *crc32Field) run(curOffset int, buf []byte) error {
func (c *crc32Field) check(curOffset int, buf []byte) error { func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { expected := binary.BigEndian.Uint32(buf[c.startOffset:])
return PacketDecodingError{"CRC didn't match"} if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
} }
return nil return nil

View file

@ -21,9 +21,13 @@ func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
return nil return nil
} }
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct { type FetchRequest struct {
MaxWaitTime int32 MaxWaitTime int32
MinBytes int32 MinBytes int32
MaxBytes int32
Version int16 Version int16
blocks map[string]map[int32]*fetchRequestBlock blocks map[string]map[int32]*fetchRequestBlock
} }
@ -32,6 +36,9 @@ func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime) pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes) pe.putInt32(r.MinBytes)
if r.Version == 3 {
pe.putInt32(r.MaxBytes)
}
err = pe.putArrayLength(len(r.blocks)) err = pe.putArrayLength(len(r.blocks))
if err != nil { if err != nil {
return err return err
@ -67,6 +74,11 @@ func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
if r.MinBytes, err = pd.getInt32(); err != nil { if r.MinBytes, err = pd.getInt32(); err != nil {
return err return err
} }
if r.Version == 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength() topicCount, err := pd.getArrayLength()
if err != nil { if err != nil {
return err return err
@ -114,6 +126,8 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
return V0_9_0_0 return V0_9_0_0
case 2: case 2:
return V0_10_0_0 return V0_10_0_0
case 3:
return V0_10_1_0
default: default:
return minVersion return minVersion
} }

View file

@ -45,7 +45,15 @@ func (m *Message) encode(pe packetEncoder) error {
pe.putInt8(attributes) pe.putInt8(attributes)
if m.Version >= 1 { if m.Version >= 1 {
pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond)) timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
}
pe.putInt64(timestamp)
} }
err := pe.putBytes(m.Key) err := pe.putBytes(m.Key)
@ -114,18 +122,30 @@ func (m *Message) decode(pd packetDecoder) (err error) {
return err return err
} }
if m.Version > 1 {
return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
}
attribute, err := pd.getInt8() attribute, err := pd.getInt8()
if err != nil { if err != nil {
return err return err
} }
m.Codec = CompressionCodec(attribute & compressionCodecMask) m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version >= 1 { if m.Version == 1 {
millis, err := pd.getInt64() millis, err := pd.getInt64()
if err != nil { if err != nil {
return err return err
} }
m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
} }
m.Key, err = pd.getBytes() m.Key, err = pd.getBytes()

View file

@ -2,6 +2,7 @@ package sarama
import ( import (
"runtime" "runtime"
"strings"
"testing" "testing"
"time" "time"
) )
@ -14,6 +15,21 @@ var (
0xFF, 0xFF, 0xFF, 0xFF, // key 0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value 0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV1Message = []byte{
204, 47, 121, 217, // CRC
0x01, // magic version byte
0x00, // attribute flags
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV2Message = []byte{
167, 236, 104, 3, // CRC
0x02, // magic version byte
0x00, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyGzipMessage = []byte{ emptyGzipMessage = []byte{
97, 79, 149, 90, //CRC 97, 79, 149, 90, //CRC
0x00, // magic version byte 0x00, // magic version byte
@ -91,7 +107,7 @@ func TestMessageEncoding(t *testing.T) {
message.Value = []byte{} message.Value = []byte{}
message.Codec = CompressionGZIP message.Codec = CompressionGZIP
if runtime.Version() == "go1.8" { if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
testEncodable(t, "empty gzip", &message, emptyGzipMessage18) testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
} else { } else {
testEncodable(t, "empty gzip", &message, emptyGzipMessage) testEncodable(t, "empty gzip", &message, emptyGzipMessage)
@ -179,3 +195,19 @@ func TestMessageDecodingBulkLZ4(t *testing.T) {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
} }
} }
func TestMessageDecodingVersion1(t *testing.T) {
message := Message{Version: 1}
testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
}
func TestMessageDecodingUnknownVersions(t *testing.T) {
message := Message{Version: 2}
err := decode(emptyV2Message, &message)
if err == nil {
t.Error("Decoding did not produce an error for an unknown magic byte")
}
if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
t.Error("Decoding an unknown magic byte produced an unknown error ", err)
}
}

View file

@ -180,6 +180,7 @@ type MockFetchResponse struct {
highWaterMarks map[string]map[int32]int64 highWaterMarks map[string]map[int32]int64
t TestReporter t TestReporter
batchSize int batchSize int
version int16
} }
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
@ -191,6 +192,11 @@ func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
} }
} }
func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
mfr.version = version
return mfr
}
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic] partitions := mfr.messages[topic]
if partitions == nil { if partitions == nil {
@ -218,7 +224,9 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest) fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{} res := &FetchResponse{
Version: mfr.version,
}
for topic, partitions := range fetchRequest.blocks { for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions { for partition, block := range partitions {
initialOffset := block.fetchOffset initialOffset := block.fetchOffset

View file

@ -151,6 +151,13 @@ type PartitionOffsetManager interface {
// message twice, and your processing should ideally be idempotent. // message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string) MarkOffset(offset int64, metadata string)
// ResetOffset resets to the provided offset, alongside a metadata string that
// represents the state of the partition consumer at that point in time. Reset
// acts as a counterpart to MarkOffset, the difference being that it allows to
// reset an offset to an earlier or smaller value, where MarkOffset only
// allows incrementing the offset. cf MarkOffset for more details.
ResetOffset(offset int64, metadata string)
// Errors returns a read channel of errors that occur during offset management, if // Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If // enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's // you want to implement any custom error handling, set your config's
@ -329,6 +336,17 @@ func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
} }
} }
func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset <= pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock() pom.lock.Lock()
defer pom.lock.Unlock() defer pom.lock.Unlock()

View file

@ -204,6 +204,70 @@ func TestPartitionOffsetManagerNextOffset(t *testing.T) {
safeClose(t, testClient) safeClose(t, testClient)
} }
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
coordinator.Returns(ocResponse)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
testClient.Config().Consumer.Offsets.Retention = time.Hour
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
handler := func(req *request) (res encoder) {
if req.body.version() != 2 {
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
}
offsetCommitRequest := req.body.(*OffsetCommitRequest)
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
}
return ocResponse
}
coordinator.setHandler(handler)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerMarkOffset(t *testing.T) { func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t) om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")

View file

@ -3,7 +3,6 @@ package sarama
import ( import (
"bufio" "bufio"
"net" "net"
"sort"
) )
type none struct{} type none struct{}
@ -23,13 +22,11 @@ func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i] slice[i], slice[j] = slice[j], slice[i]
} }
func dupeAndSort(input []int32) []int32 { func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input)) ret := make([]int32, 0, len(input))
for _, val := range input { for _, val := range input {
ret = append(ret, val) ret = append(ret, val)
} }
sort.Sort(int32Slice(ret))
return ret return ret
} }

View file

@ -4,7 +4,7 @@ go_import_path: github.com/coreos/etcd
sudo: false sudo: false
go: go:
- 1.8.3 - 1.8.4
- tip - tip
notifications: notifications:

120
vendor/github.com/coreos/etcd/Makefile generated vendored Normal file
View file

@ -0,0 +1,120 @@
# run from repository root
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
.PHONY: build
build:
GO_BUILD_FLAGS="-v" ./build
./bin/etcd --version
ETCDCTL_API=3 ./bin/etcdctl version
test:
$(info log-file: test-$(TEST_SUFFIX).log)
PASSES='fmt bom dep compile build unit' ./test 2>&1 | tee test-$(TEST_SUFFIX).log
! grep FAIL -A10 -B50 test-$(TEST_SUFFIX).log
test-all:
$(info log-file: test-all-$(TEST_SUFFIX).log)
RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' ./test 2>&1 | tee test-all-$(TEST_SUFFIX).log
! grep FAIL -A10 -B50 test-all-$(TEST_SUFFIX).log
test-proxy:
$(info log-file: test-proxy-$(TEST_SUFFIX).log)
PASSES='build grpcproxy' ./test 2>&1 | tee test-proxy-$(TEST_SUFFIX).log
! grep FAIL -A10 -B50 test-proxy-$(TEST_SUFFIX).log
test-coverage:
$(info log-file: test-coverage-$(TEST_SUFFIX).log)
COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee test-coverage-$(TEST_SUFFIX).log
$(shell curl -s https://codecov.io/bash >codecov)
chmod 700 ./codecov
./codecov -h
./codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1
# clean up failed tests, logs, dependencies
clean:
rm -f ./codecov
rm -f ./*.log
rm -f ./bin/Dockerfile-release
rm -rf ./bin/*.etcd
rm -rf ./gopath
rm -rf ./release
rm -f ./integration/127.0.0.1:* ./integration/localhost:*
rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*
# sync with Dockerfile-test, e2e/docker-dns/Dockerfile, e2e/docker-dns-srv/Dockerfile
_GO_VERSION = go1.8.4
ifdef GO_VERSION
_GO_VERSION = $(GO_VERSION)
endif
# build base container image for testing on Linux
docker-test-build:
docker build --tag gcr.io/etcd-development/etcd-test:$(_GO_VERSION) --file ./Dockerfile-test .
# e.g.
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd.json)" https://gcr.io
docker-test-push:
gcloud docker -- push gcr.io/etcd-development/etcd-test:$(_GO_VERSION)
docker-test-pull:
docker pull gcr.io/etcd-development/etcd-test:$(_GO_VERSION)
# compile etcd and etcdctl with Linux
docker-test-compile:
docker run \
--rm \
--volume=`pwd`/:/etcd \
gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
/bin/bash -c "cd /etcd && GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
# run tests inside container
docker-test:
$(info log-file: docker-test-$(TEST_SUFFIX).log)
docker run \
--rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd \
gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
/bin/bash -c "RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' ./test 2>&1 | tee docker-test-$(TEST_SUFFIX).log"
! grep FAIL -A10 -B50 docker-test-$(TEST_SUFFIX).log
docker-test-386:
$(info log-file: docker-test-386-$(TEST_SUFFIX).log)
docker run \
--rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd \
gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
/bin/bash -c "GOARCH=386 PASSES='build unit integration_e2e' ./test 2>&1 | tee docker-test-386-$(TEST_SUFFIX).log"
! grep FAIL -A10 -B50 docker-test-386-$(TEST_SUFFIX).log
docker-test-proxy:
$(info log-file: docker-test-proxy-$(TEST_SUFFIX).log)
docker run \
--rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd \
gcr.io/etcd-development/etcd-test:$(_GO_VERSION) \
/bin/bash -c "PASSES='build grpcproxy' ./test ./test 2>&1 | tee docker-test-proxy-$(TEST_SUFFIX).log"
! grep FAIL -A10 -B50 docker-test-proxy-$(TEST_SUFFIX).log
# build release container image with Linux
_ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
ifdef ETCD_VERSION
_ETCD_VERSION = $(ETCD_VERSION)
endif
docker-release-master-build: docker-test-compile
cp ./Dockerfile-release ./bin/Dockerfile-release
docker build \
--tag gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
--file ./bin/Dockerfile-release \
./bin
rm -f ./bin/Dockerfile-release
docker run \
--rm \
gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
/bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version"
docker-release-master-push:
gcloud docker -- push gcr.io/etcd-development/etcd:$(_ETCD_VERSION)

View file

@ -372,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
if err == context.Canceled || err == context.DeadlineExceeded { if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err return nil, nil, err
} }
if isOneShot { } else if resp.StatusCode/100 == 5 {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable: case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response // TODO: make sure this is a no leader response
@ -385,11 +380,17 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo
default: default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
} }
if isOneShot { err = cerr.Errors[0]
return nil, nil, cerr.Errors[0]
} }
if err != nil {
if !isOneShot {
continue continue
} }
c.Lock()
c.pinned = (k + 1) % leps
c.Unlock()
return nil, nil, err
}
if k != pinned { if k != pinned {
c.Lock() c.Lock()
c.pinned = k c.pinned = k

View file

@ -16,6 +16,7 @@ package client
import ( import (
"errors" "errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
@ -305,6 +306,8 @@ func TestHTTPClusterClientDo(t *testing.T) {
fakeURL := url.URL{} fakeURL := url.URL{}
tests := []struct { tests := []struct {
client *httpClusterClient client *httpClusterClient
ctx context.Context
wantCode int wantCode int
wantErr error wantErr error
wantPinned int wantPinned int
@ -395,10 +398,30 @@ func TestHTTPClusterClientDo(t *testing.T) {
wantCode: http.StatusTeapot, wantCode: http.StatusTeapot,
wantPinned: 1, wantPinned: 1,
}, },
// 500-level errors cause one shot Do to fallthrough to next endpoint
{
client: &httpClusterClient{
endpoints: []url.URL{fakeURL, fakeURL},
clientFactory: newStaticHTTPClientFactory(
[]staticHTTPResponse{
{resp: http.Response{StatusCode: http.StatusBadGateway}},
{resp: http.Response{StatusCode: http.StatusTeapot}},
},
),
rand: rand.New(rand.NewSource(0)),
},
ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
wantErr: fmt.Errorf("client: etcd member returns server error [Bad Gateway]"),
wantPinned: 1,
},
} }
for i, tt := range tests { for i, tt := range tests {
resp, _, err := tt.client.Do(context.Background(), nil) if tt.ctx == nil {
tt.ctx = context.Background()
}
resp, _, err := tt.client.Do(tt.ctx, nil)
if !reflect.DeepEqual(tt.wantErr, err) { if !reflect.DeepEqual(tt.wantErr, err) {
t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr) t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
continue continue
@ -407,11 +430,9 @@ func TestHTTPClusterClientDo(t *testing.T) {
if resp == nil { if resp == nil {
if tt.wantCode != 0 { if tt.wantCode != 0 {
t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode) t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
}
continue continue
} }
} else if resp.StatusCode != tt.wantCode {
if resp.StatusCode != tt.wantCode {
t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode) t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
continue continue
} }

View file

@ -1,5 +1,5 @@
hash: cee1f2629857e9c2384ad89ff6014db09498c9af53771e5144ad3a4b510ff00e hash: e18fa8fb6e4dc1d7eb3cd538c90b0927f26e1ab0b04cbdd209d2d5c3233b7c5b
updated: 2017-05-30T10:29:08.22609283-07:00 updated: 2017-10-05T07:34:38.051011-07:00
imports: imports:
- name: github.com/beorn7/perks - name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@ -108,7 +108,7 @@ imports:
- name: github.com/xiang90/probing - name: github.com/xiang90/probing
version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2 version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
- name: golang.org/x/crypto - name: golang.org/x/crypto
version: 1351f936d976c60a0a48d728281922cf63eafb8d version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
subpackages: subpackages:
- bcrypt - bcrypt
- blowfish - blowfish

View file

@ -78,7 +78,7 @@ import:
- package: github.com/grpc-ecosystem/go-grpc-prometheus - package: github.com/grpc-ecosystem/go-grpc-prometheus
version: v1.1 version: v1.1
- package: golang.org/x/crypto - package: golang.org/x/crypto
version: 1351f936d976c60a0a48d728281922cf63eafb8d version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
subpackages: subpackages:
- bcrypt - bcrypt
- blowfish - blowfish

View file

@ -26,7 +26,7 @@ import (
var ( var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with. // MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0" MinClusterVersion = "3.0.0"
Version = "3.2.6" Version = "3.2.9"
APIVersion = "unknown" APIVersion = "unknown"
// Git SHA Value will be set during build // Git SHA Value will be set during build

6
vendor/github.com/emicklei/go-restful/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,6 @@
language: go
go:
- 1.x
script: go test -v

View file

@ -1,27 +1,74 @@
Change history of go-restful Change history of go-restful
= =
2017-09-13
- added route condition functions using `.If(func)` in route building.
2017-02-16
- solved issue #304, make operation names unique
2017-01-30
[IMPORTANT] For swagger users, change your import statement to:
swagger "github.com/emicklei/go-restful-swagger12"
- moved swagger 1.2 code to go-restful-swagger12
- created TAG 2.0.0
2017-01-27
- remove defer request body close
- expose Dispatch for testing filters and Routefunctions
- swagger response model cannot be array
- created TAG 1.0.0
2016-12-22
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
2016-11-26
- Default change! now use CurlyRouter (was RouterJSR311)
- Default change! no more caching of request content
- Default change! do not recover from panics
2016-09-22
- fix the DefaultRequestContentType feature
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27 2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25 2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235) - fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14 2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) - changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters. - added support for custom EntityReaderWriters.
2015-08-06 2015-08-06
- add support for reading entities from compressed request content - add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body - use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI - add Description to Parameter for documentation in Swagger UI
2015-03-20 2015-03-20
- add configurable logging - add configurable logging
2015-03-18 2015-03-18
- if not specified, the Operation is derived from the Route function - if not specified, the Operation is derived from the Route function
2015-03-17 2015-03-17
- expose Parameter creation functions - expose Parameter creation functions
- make trace logger an interface - make trace logger an interface
- fix OPTIONSFilter - fix OPTIONSFilter
@ -30,21 +77,26 @@ Change history of go-restful
- add Notes to Route - add Notes to Route
2014-11-27 2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167) - (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12 2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI - (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10 2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI - (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31 2014-10-31
- (api change) ReturnsError -> Returns - (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder - (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs - fix swagger nested structs
- sort Swagger response messages by code - sort Swagger response messages by code
2014-10-23 2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger - (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter - fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS - (api add) Access-Control-Max-Age in CORS
@ -58,102 +110,117 @@ Change history of go-restful
- (api add) ParameterNamed for detailed documentation - (api add) ParameterNamed for detailed documentation
2014-04-16 2014-04-16
- (api add) expose constructor of Request for testing. - (api add) expose constructor of Request for testing.
2014-06-27 2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). - (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03 2014-07-03
- (api add) CORS can be configured with a list of allowed domains - (api add) CORS can be configured with a list of allowed domains
2014-03-12 2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26 2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17 2014-02-17
- (api change) renamed parameter constants (go-lint checks) - (api change) renamed parameter constants (go-lint checks)
2014-01-10 2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07 2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template. - (api change) Write* methods in Response now return the error or nil.
- fixed comparing Allowed headers in CORS (is now case-insensitive) - added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13 2013-11-13
- (api add) Response knows how many bytes are written to the response body.
- (api add) Response knows how many bytes are written to the response body.
2013-10-29 2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04 2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables - (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12 2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05 2013-08-05
- add OPTIONS support - add OPTIONS support
- add CORS support - add CORS support
2013-08-27 2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead - fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15 2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08 2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container. - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- if panic is detected then a small stack trace is printed (thanks to runner-mei) - (api add) the swagger package has be extended to have a UI per container.
- (api add) WriteErrorString to Response - if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes: Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. - (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06 2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19 2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity - (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03 2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression - (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go - changed receiver names of type functions to be more idiomatic Go
2013-06-02 2013-06-02
- (optimize) Cache the RegExp compilation of Paths. - (optimize) Cache the RegExp compilation of Paths.
2013-05-22 2013-05-22
- (api add) Added support for request/response filter functions - (api add) Added support for request/response filter functions
2013-05-18 2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline) - (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user) - (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18> [2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits - See https://github.com/emicklei/go-restful/commits
2012-11-14 2012-11-14
- Initial commit - Initial commit

7
vendor/github.com/emicklei/go-restful/Makefile generated vendored Normal file
View file

@ -0,0 +1,7 @@
all: test
test:
go test -v .
ex:
cd examples && ls *.go | xargs go build -o /tmp/ignore

View file

@ -1,8 +1,13 @@
go-restful go-restful
========== ==========
package for building REST-style Web Services using Google Go package for building REST-style Web Services using Google Go
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource - GET = Retrieve a representation of a resource
@ -40,35 +45,31 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support - Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router: - Configurable router:
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default) - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter) - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) - Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers - Response API for writing structs to JSON/XML and setting headers
- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level - Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes - Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints - Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads - Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter) - Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter) - Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see swagger package) - API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) - Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging - Configurable (trace) logging
- Customizable encoding using EntityReaderWriter registration
- Customizable gzip/deflate readers and writers using CompressorProvider registration - Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources ### Resources
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful) - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) - [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1) - [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
Type ```git shortlog -s``` for a full list of contributors. Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.

View file

@ -5,10 +5,12 @@ package restful
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
import ( import (
"bufio"
"compress/gzip" "compress/gzip"
"compress/zlib" "compress/zlib"
"errors" "errors"
"io" "io"
"net"
"net/http" "net/http"
"strings" "strings"
) )
@ -69,6 +71,17 @@ func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor return nil == c.compressor
} }
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. // WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding) header := httpRequest.Header.Get(HEADER_AcceptEncoding)

View file

@ -94,7 +94,6 @@ func TestGzipDecompressRequestBody(t *testing.T) {
httpRequest.Header.Set("Content-Encoding", "gzip") httpRequest.Header.Set("Content-Encoding", "gzip")
req.Request = httpRequest req.Request = httpRequest
doCacheReadEntityBytes = false
doc := make(map[string]interface{}) doc := make(map[string]interface{})
req.ReadEntity(&doc) req.ReadEntity(&doc)
@ -117,7 +116,6 @@ func TestZlibDecompressRequestBody(t *testing.T) {
httpRequest.Header.Set("Content-Encoding", "deflate") httpRequest.Header.Set("Content-Encoding", "deflate")
req.Request = httpRequest req.Request = httpRequest
doCacheReadEntityBytes = false
doc := make(map[string]interface{}) doc := make(map[string]interface{})
req.ReadEntity(&doc) req.ReadEntity(&doc)

View file

@ -9,25 +9,26 @@ import (
"compress/zlib" "compress/zlib"
) )
// CompressorProvider describes a component that can provider compressors for the std methods.
type CompressorProvider interface { type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later. // Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset(). // Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer. // Releases an acquired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer) ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later. // Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader. // Releases an acquired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader) ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later. // Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset(). // Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer. // Releases an acquired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer) ReleaseZlibWriter(w *zlib.Writer)
} }
@ -44,7 +45,7 @@ func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider return currentCompressorProvider
} }
// CompressorProvider sets the actual provider of compressors (zlib or gzip). // SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) { func SetCompressorProvider(p CompressorProvider) {
if p == nil { if p == nil {
panic("cannot set compressor provider to nil") panic("cannot set compressor provider to nil")

View file

@ -6,6 +6,7 @@ package restful
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
@ -24,24 +25,24 @@ type Container struct {
ServeMux *http.ServeMux ServeMux *http.ServeMux
isRegisteredOnRoot bool isRegisteredOnRoot bool
containerFilters []FilterFunction containerFilters []FilterFunction
doNotRecover bool // default is false doNotRecover bool // default is true
recoverHandleFunc RecoverHandleFunction recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction serviceErrorHandleFunc ServiceErrorHandleFunction
router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
contentEncodingEnabled bool // default is false contentEncodingEnabled bool // default is false
} }
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311) // NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
func NewContainer() *Container { func NewContainer() *Container {
return &Container{ return &Container{
webServices: []*WebService{}, webServices: []*WebService{},
ServeMux: http.NewServeMux(), ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false, isRegisteredOnRoot: false,
containerFilters: []FilterFunction{}, containerFilters: []FilterFunction{},
doNotRecover: false, doNotRecover: true,
recoverHandleFunc: logStackOnRecover, recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError, serviceErrorHandleFunc: writeServiceError,
router: RouterJSR311{}, router: CurlyRouter{},
contentEncodingEnabled: false} contentEncodingEnabled: false}
} }
@ -68,12 +69,12 @@ func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
// DoNotRecover controls whether panics will be caught to return HTTP 500. // DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation. // If set to true, Route functions are responsible for handling any error situation.
// Default value is false = recover from panics. This has performance implications. // Default value is true.
func (c *Container) DoNotRecover(doNot bool) { func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot c.doNotRecover = doNot
} }
// Router changes the default Router (currently RouterJSR311) // Router changes the default Router (currently CurlyRouter)
func (c *Container) Router(aRouter RouteSelector) { func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter c.router = aRouter
} }
@ -83,18 +84,42 @@ func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled c.contentEncodingEnabled = enabled
} }
// Add a WebService to the Container. It will detect duplicate root paths and panic in that case. // Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container { func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock() c.webServicesLock.Lock()
defer c.webServicesLock.Unlock() defer c.webServicesLock.Unlock()
// If registered on root then no additional specific mapping is needed
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// If not registered on root then add specific mapping
if !c.isRegisteredOnRoot { if !c.isRegisteredOnRoot {
pattern := c.fixedPrefixPath(service.RootPath()) c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
// addHandler may set a new HandleFunc for the serveMux
// this function must run inside the critical region protected by the webServicesLock.
// returns true if the function was registered on root ("/")
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
pattern := fixedPrefixPath(service.RootPath())
// check if root path registration is needed // check if root path registration is needed
if "/" == pattern || "" == pattern { if "/" == pattern || "" == pattern {
c.ServeMux.HandleFunc("/", c.dispatch) serveMux.HandleFunc("/", c.dispatch)
c.isRegisteredOnRoot = true return true
} else { }
// detect if registration already exists // detect if registration already exists
alreadyMapped := false alreadyMapped := false
for _, each := range c.webServices { for _, each := range c.webServices {
@ -104,38 +129,36 @@ func (c *Container) Add(service *WebService) *Container {
} }
} }
if !alreadyMapped { if !alreadyMapped {
c.ServeMux.HandleFunc(pattern, c.dispatch) serveMux.HandleFunc(pattern, c.dispatch)
if !strings.HasSuffix(pattern, "/") { if !strings.HasSuffix(pattern, "/") {
c.ServeMux.HandleFunc(pattern+"/", c.dispatch) serveMux.HandleFunc(pattern+"/", c.dispatch)
} }
} }
} return false
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
c.webServices = append(c.webServices, service)
return c
} }
func (c *Container) Remove(ws *WebService) error { func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Print(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock() c.webServicesLock.Lock()
defer c.webServicesLock.Unlock() defer c.webServicesLock.Unlock()
// build a new ServeMux and re-register all WebServices
newServeMux := http.NewServeMux()
newServices := []*WebService{} newServices := []*WebService{}
for ix := range c.webServices { newIsRegisteredOnRoot := false
if c.webServices[ix].rootPath != ws.rootPath { for _, each := range c.webServices {
newServices = append(newServices, c.webServices[ix]) if each.rootPath != ws.rootPath {
// If not registered on root then add specific mapping
if !newIsRegisteredOnRoot {
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
}
newServices = append(newServices, each)
} }
} }
c.webServices = newServices c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil return nil
} }
@ -165,6 +188,17 @@ func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message) resp.WriteErrorString(err.Code, err.Message)
} }
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
if httpWriter == nil {
panic("httpWriter cannot be nil")
}
if httpRequest == nil {
panic("httpRequest cannot be nil")
}
c.dispatch(httpWriter, httpRequest)
}
// Dispatch the incoming Http Request to a matching WebService. // Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter writer := httpWriter
@ -185,12 +219,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
} }
}() }()
} }
// Install closing the request body (if any)
defer func() {
if nil != httpRequest.Body {
httpRequest.Body.Close()
}
}()
// Detect if compression is needed // Detect if compression is needed
// assume without compression, test for override // assume without compression, test for override
@ -251,7 +279,7 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
} }
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} // fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
func (c Container) fixedPrefixPath(pathspec string) string { func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{") varBegin := strings.Index(pathspec, "{")
if -1 == varBegin { if -1 == varBegin {
return pathspec return pathspec
@ -260,12 +288,12 @@ func (c Container) fixedPrefixPath(pathspec string) string {
} }
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server // ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest) c.ServeMux.ServeHTTP(httpwriter, httpRequest)
} }
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. // Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
func (c Container) Handle(pattern string, handler http.Handler) { func (c *Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler) c.ServeMux.Handle(pattern, handler)
} }
@ -295,7 +323,7 @@ func (c *Container) Filter(filter FilterFunction) {
} }
// RegisteredWebServices returns the collections of added WebServices // RegisteredWebServices returns the collections of added WebServices
func (c Container) RegisteredWebServices() []*WebService { func (c *Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock() c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock() defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices)) result := make([]*WebService, len(c.webServices))
@ -306,7 +334,7 @@ func (c Container) RegisteredWebServices() []*WebService {
} }
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request // computeAllowedMethods returns a list of HTTP methods that are valid for a Request
func (c Container) computeAllowedMethods(req *Request) []string { func (c *Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options // Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{} methods := []string{}
requestPath := req.Request.URL.Path requestPath := req.Request.URL.Path

View file

@ -59,3 +59,25 @@ func TestContainer_HandleWithFilter(t *testing.T) {
t.Errorf("handler added by calling HandleWithFilter wasn't called") t.Errorf("handler added by calling HandleWithFilter wasn't called")
} }
} }
func TestContainerAddAndRemove(t *testing.T) {
ws1 := new(WebService).Path("/")
ws2 := new(WebService).Path("/users")
wc := NewContainer()
wc.Add(ws1)
wc.Add(ws2)
wc.Remove(ws2)
if len(wc.webServices) != 1 {
t.Errorf("expected one webservices")
}
if !wc.isRegisteredOnRoot {
t.Errorf("expected on root registered")
}
wc.Remove(ws1)
if len(wc.webServices) > 0 {
t.Errorf("expected zero webservices")
}
if wc.isRegisteredOnRoot {
t.Errorf("expected not on root registered")
}
}

View file

@ -5,6 +5,7 @@ package restful
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
import ( import (
"regexp"
"strconv" "strconv"
"strings" "strings"
) )
@ -19,11 +20,13 @@ import (
type CrossOriginResourceSharing struct { type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names AllowedHeaders []string // list of Header names
AllowedDomains []string // list of allowed values for Http Origin. If empty all are allowed. AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool CookiesAllowed bool
Container *Container Container *Container
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
} }
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html // Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
@ -37,22 +40,13 @@ func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *
chain.ProcessFilter(req, resp) chain.ProcessFilter(req, resp)
return return
} }
if len(c.AllowedDomains) > 0 { // if provided then origin must be included if !c.isOriginAllowed(origin) { // check whether this origin is allowed
included := false
for _, each := range c.AllowedDomains {
if each == origin {
included = true
break
}
}
if !included {
if trace { if trace {
traceLogger.Printf("HTTP Origin:%s is not part of %v", origin, c.AllowedDomains) traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
} }
chain.ProcessFilter(req, resp) chain.ProcessFilter(req, resp)
return return
} }
}
if req.Request.Method != "OPTIONS" { if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp) c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp) chain.ProcessFilter(req, resp)
@ -74,8 +68,12 @@ func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 { if len(c.AllowedMethods) == 0 {
if c.Container == nil {
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
} else {
c.AllowedMethods = c.Container.computeAllowedMethods(req) c.AllowedMethods = c.Container.computeAllowedMethods(req)
} }
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
@ -124,13 +122,32 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(c.AllowedDomains) == 0 { if len(c.AllowedDomains) == 0 {
return true return true
} }
allowed := false allowed := false
for _, each := range c.AllowedDomains { for _, domain := range c.AllowedDomains {
if each == origin { if domain == origin {
allowed = true allowed = true
break break
} }
} }
if !allowed {
if len(c.allowedOriginPatterns) == 0 {
// compile allowed domains to allowed origin patterns
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
if err != nil {
return false
}
c.allowedOriginPatterns = allowedOriginRegexps
}
for _, pattern := range c.allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
}
return allowed return allowed
} }
@ -170,3 +187,16 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str
} }
return false return false
} }
// Take a list of strings and compile them into a list of regular expressions.
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
regexps := []*regexp.Regexp{}
for _, regexpStr := range regexpStrings {
r, err := regexp.Compile(regexpStr)
if err != nil {
return regexps, err
}
regexps = append(regexps, r)
}
return regexps, nil
}

View file

@ -29,7 +29,7 @@ func TestCORSFilter_Preflight(t *testing.T) {
httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header") httpRequest.Header.Set(HEADER_AccessControlRequestHeaders, "X-Custom-Header, X-Additional-Header")
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest) DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
if "http://api.bob.com" != actual { if "http://api.bob.com" != actual {
@ -78,7 +78,7 @@ func TestCORSFilter_Actual(t *testing.T) {
httpRequest.Header.Set("X-Custom-Header", "value") httpRequest.Header.Set("X-Custom-Header", "value")
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest) DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
if "http://api.bob.com" != actual { if "http://api.bob.com" != actual {
t.Fatal("expected: http://api.bob.com but got:" + actual) t.Fatal("expected: http://api.bob.com but got:" + actual)
@ -91,9 +91,13 @@ func TestCORSFilter_Actual(t *testing.T) {
var allowedDomainInput = []struct { var allowedDomainInput = []struct {
domains []string domains []string
origin string origin string
accepted bool allowed bool
}{ }{
{[]string{}, "http://anything.com", true}, {[]string{}, "http://anything.com", true},
{[]string{"example.com"}, "example.com", true},
{[]string{"example.com"}, "not-allowed", false},
{[]string{"not-matching.com", "example.com"}, "example.com", true},
{[]string{".*"}, "example.com", true},
} }
// go test -v -test.run TestCORSFilter_AllowedDomains ...restful // go test -v -test.run TestCORSFilter_AllowedDomains ...restful
@ -113,12 +117,12 @@ func TestCORSFilter_AllowedDomains(t *testing.T) {
httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil) httpRequest, _ := http.NewRequest("PUT", "http://api.his.com/cors", nil)
httpRequest.Header.Set(HEADER_Origin, each.origin) httpRequest.Header.Set(HEADER_Origin, each.origin)
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest) DefaultContainer.Dispatch(httpWriter, httpRequest)
actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin) actual := httpWriter.Header().Get(HEADER_AccessControlAllowOrigin)
if actual != each.origin && each.accepted { if actual != each.origin && each.allowed {
t.Fatal("expected to be accepted") t.Fatal("expected to be accepted")
} }
if actual == each.origin && !each.accepted { if actual == each.origin && !each.allowed {
t.Fatal("did not expect to be accepted") t.Fatal("did not expect to be accepted")
} }
} }

View file

@ -44,16 +44,16 @@ func (c CurlyRouter) SelectRoute(
} }
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) []Route { func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
candidates := &sortableCurlyRoutes{[]*curlyRoute{}} candidates := sortableCurlyRoutes{}
for _, each := range ws.routes { for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches { if matches {
candidates.add(&curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
} }
} }
sort.Sort(sort.Reverse(candidates)) sort.Sort(sort.Reverse(candidates))
return candidates.routes() return candidates
} }
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. // matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
@ -108,11 +108,13 @@ func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, reque
return (matched && err == nil), false return (matched && err == nil), false
} }
var jsr311Router = RouterJSR311{}
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type // detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go // headers of the Request. See also RouterJSR311 in jsr311.go
func (c CurlyRouter) detectRoute(candidateRoutes []Route, httpRequest *http.Request) (*Route, error) { func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute // tracing is done inside detectRoute
return RouterJSR311{}.detectRoute(candidateRoutes, httpRequest) return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
} }
// detectWebService returns the best matching webService given the list of path tokens. // detectWebService returns the best matching webService given the list of path tokens.

View file

@ -11,30 +11,28 @@ type curlyRoute struct {
staticCount int staticCount int
} }
type sortableCurlyRoutes struct { type sortableCurlyRoutes []curlyRoute
candidates []*curlyRoute
func (s *sortableCurlyRoutes) add(route curlyRoute) {
*s = append(*s, route)
} }
func (s *sortableCurlyRoutes) add(route *curlyRoute) { func (s sortableCurlyRoutes) routes() (routes []Route) {
s.candidates = append(s.candidates, route) for _, each := range s {
}
func (s *sortableCurlyRoutes) routes() (routes []Route) {
for _, each := range s.candidates {
routes = append(routes, each.route) // TODO change return type routes = append(routes, each.route) // TODO change return type
} }
return routes return routes
} }
func (s *sortableCurlyRoutes) Len() int { func (s sortableCurlyRoutes) Len() int {
return len(s.candidates) return len(s)
} }
func (s *sortableCurlyRoutes) Swap(i, j int) { func (s sortableCurlyRoutes) Swap(i, j int) {
s.candidates[i], s.candidates[j] = s.candidates[j], s.candidates[i] s[i], s[j] = s[j], s[i]
} }
func (s *sortableCurlyRoutes) Less(i, j int) bool { func (s sortableCurlyRoutes) Less(i, j int) bool {
ci := s.candidates[i] ci := s[i]
cj := s.candidates[j] cj := s[j]
// primary key // primary key
if ci.staticCount < cj.staticCount { if ci.staticCount < cj.staticCount {

View file

@ -163,12 +163,12 @@ func TestCurly_ISSUE_34(t *testing.T) {
ws1 := new(WebService).Path("/") ws1 := new(WebService).Path("/")
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy)) ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy)) ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12")) croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
if len(routes) != 2 { if len(croutes) != 2 {
t.Fatal("expected 2 routes") t.Fatal("expected 2 routes")
} }
if routes[0].Path != "/network/{id}" { if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
t.Error("first is", routes[0].Path) t.Errorf("got %v want %v", got, want)
} }
} }
@ -177,12 +177,12 @@ func TestCurly_ISSUE_34_2(t *testing.T) {
ws1 := new(WebService) ws1 := new(WebService)
ws1.Route(ws1.GET("/network/{id}").To(curlyDummy)) ws1.Route(ws1.GET("/network/{id}").To(curlyDummy))
ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy)) ws1.Route(ws1.GET("/{type}/{id}").To(curlyDummy))
routes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12")) croutes := CurlyRouter{}.selectRoutes(ws1, tokenizePath("/network/12"))
if len(routes) != 2 { if len(croutes) != 2 {
t.Fatal("expected 2 routes") t.Fatal("expected 2 routes")
} }
if routes[0].Path != "/network/{id}" { if got, want := croutes[0].route.Path, "/network/{id}"; got != want {
t.Error("first is", routes[0].Path) t.Errorf("got %v want %v", got, want)
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
Package restful, a lean package for creating REST-style WebServices without magic. Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes WebServices and Routes
@ -145,22 +145,11 @@ Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it. This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.Router(CurlyRouter{}) restful.DefaultContainer.DoNotRecover(false)
The default router is the RouterJSR311 which is an implementation of its spec (http://jsr311.java.net/nonav/releases/1.1/spec/spec.html).
However, it uses regular expressions for all its routes which, depending on your usecase, may consume a significant amount of time.
The CurlyRouter implementation is more lightweight that also allows you to use wildcards and expressions, but only if needed.
restful.DefaultContainer.DoNotRecover(true)
DoNotRecover controls whether panics will be caught to return HTTP 500. DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to true, Route functions are responsible for handling any error situation. If set to false, the container will recover from panics.
Default value is false; it will recover from panics. This has performance implications. Default value is true
restful.SetCacheReadEntity(false)
SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
If you expect to read large amounts of payload data, and you do not use this feature, you should set it to false.
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))

View file

@ -36,8 +36,8 @@ type entityReaderWriters struct {
} }
func init() { func init() {
RegisterEntityAccessor(MIME_JSON, entityJSONAccess{ContentType: MIME_JSON}) RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
RegisterEntityAccessor(MIME_XML, entityXMLAccess{ContentType: MIME_XML}) RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
} }
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. // RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
@ -47,8 +47,20 @@ func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.accessors[mime] = erw entityAccessRegistry.accessors[mime] = erw
} }
// AccessorAt returns the registered ReaderWriter for this MIME type. // NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
func (r *entityReaderWriters) AccessorAt(mime string) (EntityReaderWriter, bool) { // This package is already initialized with such an accessor using the MIME_JSON contentType.
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
return entityJSONAccess{ContentType: contentType}
}
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
// This package is already initialized with such an accessor using the MIME_XML contentType.
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
return entityXMLAccess{ContentType: contentType}
}
// accessorAt returns the registered ReaderWriter for this MIME type.
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock() r.protection.RLock()
defer r.protection.RUnlock() defer r.protection.RUnlock()
er, ok := r.accessors[mime] er, ok := r.accessors[mime]

View file

@ -49,7 +49,7 @@ func TestKeyValueEncoding(t *testing.T) {
// Write // Write
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
// Accept Produces // Accept Produces
resp := Response{httpWriter, "application/kv,*/*;q=0.8", []string{"application/kv"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/kv,*/*;q=0.8", routeProduces: []string{"application/kv"}, prettyPrint: true}
resp.WriteEntity(b) resp.WriteEntity(b)
t.Log(string(httpWriter.Body.Bytes())) t.Log(string(httpWriter.Body.Bytes()))
if !kv.writeCalled { if !kv.writeCalled {

View file

@ -24,3 +24,12 @@ func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction // FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain) type FilterFunction func(*Request, *Response, *FilterChain)
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
// See examples/restful-no-cache-filter.go for usage
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
resp.Header().Set("Expires", "0") // Proxies.
chain.ProcessFilter(req, resp)
}

View file

@ -1,9 +0,0 @@
cd examples
ls *.go | xargs -I {} go build -o /tmp/ignore {}
cd ..
go fmt ...swagger && \
go test -test.v ...swagger && \
go install ...swagger && \
go fmt ...restful && \
go test -test.v ...restful && \
go install ...restful

View file

@ -41,9 +41,29 @@ func (r RouterJSR311) SelectRoute(
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
ifOk := []Route{}
for _, each := range routes {
ok := true
for _, fn := range each.If {
if !fn(httpRequest) {
ok = false
break
}
}
if ok {
ifOk = append(ifOk, each)
}
}
if len(ifOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
}
return nil, NewError(http.StatusNotFound, "404: Not Found")
}
// http method // http method
methodOk := []Route{} methodOk := []Route{}
for _, each := range routes { for _, each := range ifOk {
if httpRequest.Method == each.Method { if httpRequest.Method == each.Method {
methodOk = append(methodOk, each) methodOk = append(methodOk, each)
} }
@ -74,7 +94,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
// accept // accept
outputMediaOk := []Route{} outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept) accept := httpRequest.Header.Get(HEADER_Accept)
if accept == "" { if len(accept) == 0 {
accept = "*/*" accept = "*/*"
} }
for _, each := range inputMediaOk { for _, each := range inputMediaOk {
@ -88,7 +108,8 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
} }
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
} }
return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return &outputMediaOk[0], nil
} }
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 // http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2

View file

@ -2,6 +2,7 @@ package restful
import ( import (
"io" "io"
"net/http"
"sort" "sort"
"testing" "testing"
) )
@ -209,4 +210,42 @@ func TestSortableRouteCandidates(t *testing.T) {
} }
} }
func TestDetectRouteReturns404IfNoRoutePassesConditions(t *testing.T) {
called := false
shouldNotBeCalledButWas := false
routes := []Route{
new(RouteBuilder).To(dummy).
If(func(req *http.Request) bool { return false }).
Build(),
// check that condition functions are called in order
new(RouteBuilder).
To(dummy).
If(func(req *http.Request) bool { return true }).
If(func(req *http.Request) bool { called = true; return false }).
Build(),
// check that condition functions short circuit
new(RouteBuilder).
To(dummy).
If(func(req *http.Request) bool { return false }).
If(func(req *http.Request) bool { shouldNotBeCalledButWas = true; return false }).
Build(),
}
_, err := RouterJSR311{}.detectRoute(routes, (*http.Request)(nil))
if se := err.(ServiceError); se.Code != 404 {
t.Fatalf("expected 404, got %d", se.Code)
}
if !called {
t.Fatal("expected condition function to get called, but it wasn't")
}
if shouldNotBeCalledButWas {
t.Fatal("expected condition function to not be called, but it was")
}
}
func dummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "dummy") } func dummy(req *Request, resp *Response) { io.WriteString(resp.ResponseWriter, "dummy") }

View file

@ -5,7 +5,7 @@ import (
"os" "os"
) )
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger // StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface { type StdLogger interface {
Print(v ...interface{}) Print(v ...interface{})
Printf(format string, v ...interface{}) Printf(format string, v ...interface{})
@ -18,14 +18,17 @@ func init() {
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
} }
// SetLogger sets the logger for this package
func SetLogger(customLogger StdLogger) { func SetLogger(customLogger StdLogger) {
Logger = customLogger Logger = customLogger
} }
// Print delegates to the Logger
func Print(v ...interface{}) { func Print(v ...interface{}) {
Logger.Print(v...) Logger.Print(v...)
} }
// Printf delegates to the Logger
func Printf(format string, v ...interface{}) { func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...) Logger.Printf(format, v...)
} }

View file

@ -21,7 +21,7 @@ func TraceLogger(logger log.StdLogger) {
EnableTracing(logger != nil) EnableTracing(logger != nil)
} }
// expose the setter for the global logger on the top-level package // SetLogger exposes the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) { func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger) log.SetLogger(customLogger)
} }

45
vendor/github.com/emicklei/go-restful/mime.go generated vendored Normal file
View file

@ -0,0 +1,45 @@
package restful
import (
"strconv"
"strings"
)
type mime struct {
media string
quality float64
}
// insertMime adds a mime to a list and keeps it sorted by quality.
func insertMime(l []mime, e mime) []mime {
for i, each := range l {
// if current mime has lower quality then insert before
if e.quality > each.quality {
left := append([]mime{}, l[0:i]...)
return append(append(left, e), l[i:]...)
}
}
return append(l, e)
}
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
func sortedMimes(accept string) (sorted []mime) {
for _, each := range strings.Split(accept, ",") {
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
if len(typeAndQuality) == 1 {
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
} else {
// take factor
parts := strings.Split(typeAndQuality[1], "=")
if len(parts) == 2 {
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
} else {
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
}
}
}
}
return
}

17
vendor/github.com/emicklei/go-restful/mime_test.go generated vendored Normal file
View file

@ -0,0 +1,17 @@
package restful
import (
"fmt"
"testing"
)
// go test -v -test.run TestSortMimes ...restful
func TestSortMimes(t *testing.T) {
accept := "text/html; q=0.8, text/plain, image/gif, */*; q=0.01, image/jpeg"
result := sortedMimes(accept)
got := fmt.Sprintf("%v", result)
want := "[{text/plain 1} {image/gif 1} {image/jpeg 1} {text/html 0.8} {*/* 0.01}]"
if got != want {
t.Errorf("bad sort order of mime types:%s", got)
}
}

View file

@ -15,7 +15,15 @@ func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterCha
chain.ProcessFilter(req, resp) chain.ProcessFilter(req, resp)
return return
} }
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
methods := strings.Join(c.computeAllowedMethods(req), ",")
origin := req.Request.Header.Get(HEADER_Origin)
resp.AddHeader(HEADER_Allow, methods)
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
} }
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method // OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method

View file

@ -5,20 +5,15 @@ package restful
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
import ( import (
"bytes"
"compress/zlib" "compress/zlib"
"io/ioutil"
"net/http" "net/http"
) )
var defaultRequestContentType string var defaultRequestContentType string
var doCacheReadEntityBytes = true
// Request is a wrapper for a http Request that provides convenience methods // Request is a wrapper for a http Request that provides convenience methods
type Request struct { type Request struct {
Request *http.Request Request *http.Request
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
pathParameters map[string]string pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
@ -41,12 +36,6 @@ func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime defaultRequestContentType = mime
} }
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
func SetCacheReadEntity(doCache bool) {
doCacheReadEntityBytes = doCache
}
// PathParameter accesses the Path parameter value by its name // PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string { func (r *Request) PathParameter(name string) string {
return r.pathParameters[name] return r.pathParameters[name]
@ -81,18 +70,6 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType) contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// OLD feature, cache the body for reads
if doCacheReadEntityBytes {
if r.bodyContent == nil {
data, err := ioutil.ReadAll(r.Request.Body)
if err != nil {
return err
}
r.bodyContent = &data
}
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
}
// check if the request body needs decompression // check if the request body needs decompression
if ENCODING_GZIP == contentEncoding { if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader() gzipReader := currentCompressorProvider.AcquireGzipReader()
@ -107,11 +84,16 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
r.Request.Body = zlibReader r.Request.Body = zlibReader
} }
// lookup the EntityReader // lookup the EntityReader, use defaultRequestContentType if needed and provided
entityReader, ok := entityAccessRegistry.AccessorAt(contentType) entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
if len(defaultRequestContentType) != 0 {
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
}
if !ok { if !ok {
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
} }
}
return entityReader.Read(r, entityPointer) return entityReader.Read(r, entityPointer)
} }

View file

@ -29,38 +29,6 @@ type Sample struct {
Value string Value string
} }
func TestReadEntityXmlCached(t *testing.T) {
SetCacheReadEntity(true)
bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
httpRequest.Header.Set("Content-Type", "application/xml")
request := &Request{Request: httpRequest}
sam := new(Sample)
request.ReadEntity(sam)
if sam.Value != "42" {
t.Fatal("read failed")
}
if request.bodyContent == nil {
t.Fatal("no expected cached bytes found")
}
}
func TestReadEntityXmlNonCached(t *testing.T) {
SetCacheReadEntity(false)
bodyReader := strings.NewReader("<Sample><Value>42</Value></Sample>")
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
httpRequest.Header.Set("Content-Type", "application/xml")
request := &Request{Request: httpRequest}
sam := new(Sample)
request.ReadEntity(sam)
if sam.Value != "42" {
t.Fatal("read failed")
}
if request.bodyContent != nil {
t.Fatal("unexpected cached bytes found")
}
}
func TestReadEntityJson(t *testing.T) { func TestReadEntityJson(t *testing.T) {
bodyReader := strings.NewReader(`{"Value" : "42"}`) bodyReader := strings.NewReader(`{"Value" : "42"}`)
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
@ -86,37 +54,6 @@ func TestReadEntityJsonCharset(t *testing.T) {
} }
func TestReadEntityJsonNumber(t *testing.T) { func TestReadEntityJsonNumber(t *testing.T) {
SetCacheReadEntity(true)
bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
httpRequest.Header.Set("Content-Type", "application/json")
request := &Request{Request: httpRequest}
any := make(Anything)
request.ReadEntity(&any)
number, ok := any["Value"].(json.Number)
if !ok {
t.Fatal("read failed")
}
vint, err := number.Int64()
if err != nil {
t.Fatal("convert failed")
}
if vint != 4899710515899924123 {
t.Fatal("read failed")
}
vfloat, err := number.Float64()
if err != nil {
t.Fatal("convert failed")
}
// match the default behaviour
vstring := strconv.FormatFloat(vfloat, 'e', 15, 64)
if vstring != "4.899710515899924e+18" {
t.Fatal("convert float64 failed")
}
}
func TestReadEntityJsonNumberNonCached(t *testing.T) {
SetCacheReadEntity(false)
bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`) bodyReader := strings.NewReader(`{"Value" : 4899710515899924123}`)
httpRequest, _ := http.NewRequest("GET", "/test", bodyReader) httpRequest, _ := http.NewRequest("GET", "/test", bodyReader)
httpRequest.Header.Set("Content-Type", "application/json") httpRequest.Header.Set("Content-Type", "application/json")

View file

@ -5,12 +5,13 @@ package restful
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
import ( import (
"bufio"
"errors" "errors"
"net"
"net/http" "net/http"
"strings"
) )
// DEPRECATED, use DefaultResponseContentType(mime) // DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization //PrettyPrintResponses controls the indentation feature of XML and JSON serialization
@ -22,17 +23,20 @@ type Response struct {
http.ResponseWriter http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called err error // err property is kept when WriteError is called
hijacker http.Hijacker // if underlying ResponseWriter supports it
} }
// Creates a new response based on a http ResponseWriter. // NewResponse creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response { func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types hijacker, _ := httpWriter.(http.Hijacker)
return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
} }
// DefaultResponseContentType set a default.
// If Accept header matching fails, fall back to this type. // If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML // Valid values are restful.MIME_JSON and restful.MIME_XML
// Example: // Example:
@ -48,6 +52,16 @@ func (r Response) InternalServerError() Response {
return r return r
} }
// Hijack implements the http.Hijacker interface. This expands
// the Response to fulfill http.Hijacker if the underlying
// http.ResponseWriter supports it.
func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if r.hijacker == nil {
return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
}
return r.hijacker.Hijack()
}
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. // PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) { func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty r.prettyPrint = bePretty
@ -68,38 +82,39 @@ func (r *Response) SetRequestAccepts(mime string) {
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. // can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. // If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) { func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
for _, qualifiedMime := range strings.Split(r.requestAccept, ",") { sorted := sortedMimes(r.requestAccept)
mime := strings.Trim(strings.Split(qualifiedMime, ";")[0], " ") for _, eachAccept := range sorted {
if 0 == len(mime) || mime == "*/*" { for _, eachProduce := range r.routeProduces {
if eachProduce == eachAccept.media {
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
return w, true
}
}
}
if eachAccept.media == "*/*" {
for _, each := range r.routeProduces { for _, each := range r.routeProduces {
if MIME_JSON == each { if w, ok := entityAccessRegistry.accessorAt(each); ok {
return entityAccessRegistry.AccessorAt(MIME_JSON) return w, true
}
if MIME_XML == each {
return entityAccessRegistry.AccessorAt(MIME_XML)
}
}
} else { // mime is not blank; see if we have a match in Produces
for _, each := range r.routeProduces {
if mime == each {
if MIME_JSON == each {
return entityAccessRegistry.AccessorAt(MIME_JSON)
}
if MIME_XML == each {
return entityAccessRegistry.AccessorAt(MIME_XML)
} }
} }
} }
} }
} // if requestAccept is empty
writer, ok := entityAccessRegistry.AccessorAt(r.requestAccept) writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok { if !ok {
// if not registered then fallback to the defaults (if set) // if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON { if DefaultResponseMimeType == MIME_JSON {
return entityAccessRegistry.AccessorAt(MIME_JSON) return entityAccessRegistry.accessorAt(MIME_JSON)
} }
if DefaultResponseMimeType == MIME_XML { if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.AccessorAt(MIME_XML) return entityAccessRegistry.accessorAt(MIME_XML)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
} }
if trace { if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
@ -130,25 +145,25 @@ func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
} }
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) // WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter. // It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error { func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value) return writeXML(r, http.StatusOK, MIME_XML, value)
} }
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) // WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the valuel ; not using a registered EntityReaderWriter. // It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value) return writeXML(r, status, MIME_XML, value)
} }
// WriteAsJson is a convenience method for writing a value in json. // WriteAsJson is a convenience method for writing a value in json.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter. // It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error { func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value) return writeJSON(r, http.StatusOK, MIME_JSON, value)
} }
// WriteJson is a convenience method for writing a value in Json with a given Content-Type. // WriteJson is a convenience method for writing a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the valuel ; not using a registered EntityReaderWriter. // It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error { func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value) return writeJSON(r, http.StatusOK, contentType, value)
} }
@ -184,6 +199,15 @@ func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
return nil return nil
} }
// Flush implements http.Flusher interface, which sends any buffered data to the client.
func (r *Response) Flush() {
if f, ok := r.ResponseWriter.(http.Flusher); ok {
f.Flush()
} else if trace {
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
}
}
// WriteHeader is overridden to remember the Status Code that has been written. // WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this. // Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) { func (r *Response) WriteHeader(httpStatus int) {

View file

@ -10,7 +10,7 @@ import (
func TestWriteHeader(t *testing.T) { func TestWriteHeader(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteHeader(123) resp.WriteHeader(123)
if resp.StatusCode() != 123 { if resp.StatusCode() != 123 {
t.Errorf("Unexpected status code:%d", resp.StatusCode()) t.Errorf("Unexpected status code:%d", resp.StatusCode())
@ -19,7 +19,7 @@ func TestWriteHeader(t *testing.T) {
func TestNoWriteHeader(t *testing.T) { func TestNoWriteHeader(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
if resp.StatusCode() != http.StatusOK { if resp.StatusCode() != http.StatusOK {
t.Errorf("Unexpected status code:%d", resp.StatusCode()) t.Errorf("Unexpected status code:%d", resp.StatusCode())
} }
@ -32,7 +32,7 @@ type food struct {
// go test -v -test.run TestMeasureContentLengthXml ...restful // go test -v -test.run TestMeasureContentLengthXml ...restful
func TestMeasureContentLengthXml(t *testing.T) { func TestMeasureContentLengthXml(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteAsXml(food{"apple"}) resp.WriteAsXml(food{"apple"})
if resp.ContentLength() != 76 { if resp.ContentLength() != 76 {
t.Errorf("Incorrect measured length:%d", resp.ContentLength()) t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@ -42,7 +42,7 @@ func TestMeasureContentLengthXml(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthJson ...restful // go test -v -test.run TestMeasureContentLengthJson ...restful
func TestMeasureContentLengthJson(t *testing.T) { func TestMeasureContentLengthJson(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteAsJson(food{"apple"}) resp.WriteAsJson(food{"apple"})
if resp.ContentLength() != 22 { if resp.ContentLength() != 22 {
t.Errorf("Incorrect measured length:%d", resp.ContentLength()) t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@ -52,7 +52,7 @@ func TestMeasureContentLengthJson(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful // go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful
func TestMeasureContentLengthJsonNotPretty(t *testing.T) { func TestMeasureContentLengthJsonNotPretty(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, false, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}}
resp.WriteAsJson(food{"apple"}) resp.WriteAsJson(food{"apple"})
if resp.ContentLength() != 17 { // 16+1 using the Encoder directly yields another /n if resp.ContentLength() != 17 { // 16+1 using the Encoder directly yields another /n
t.Errorf("Incorrect measured length:%d", resp.ContentLength()) t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@ -62,7 +62,7 @@ func TestMeasureContentLengthJsonNotPretty(t *testing.T) {
// go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful // go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful
func TestMeasureContentLengthWriteErrorString(t *testing.T) { func TestMeasureContentLengthWriteErrorString(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteErrorString(404, "Invalid") resp.WriteErrorString(404, "Invalid")
if resp.ContentLength() != len("Invalid") { if resp.ContentLength() != len("Invalid") {
t.Errorf("Incorrect measured length:%d", resp.ContentLength()) t.Errorf("Incorrect measured length:%d", resp.ContentLength())
@ -80,7 +80,7 @@ func TestStatusIsPassedToResponse(t *testing.T) {
{write: 400, read: 400}, {write: 400, read: 400},
} { } {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "*/*", routeProduces: []string{"*/*"}, prettyPrint: true}
resp.WriteHeader(each.write) resp.WriteHeader(each.write)
if got, want := httpWriter.Code, each.read; got != want { if got, want := httpWriter.Code, each.read; got != want {
t.Errorf("got %v want %v", got, want) t.Errorf("got %v want %v", got, want)
@ -91,11 +91,11 @@ func TestStatusIsPassedToResponse(t *testing.T) {
// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful // go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful
func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) { func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteHeader(201) resp.WriteHeader(201)
resp.WriteAsJson(food{"Juicy"}) resp.WriteAsJson(food{"Juicy"})
if httpWriter.HeaderMap.Get("Content-Type") != "application/json" { if httpWriter.HeaderMap.Get("Content-Type") != "application/json" {
t.Errorf("Expected content type json but got:%d", httpWriter.HeaderMap.Get("Content-Type")) t.Errorf("Expected content type json but got:%s", httpWriter.HeaderMap.Get("Content-Type"))
} }
if httpWriter.Code != 201 { if httpWriter.Code != 201 {
t.Errorf("Expected status 201 but got:%d", httpWriter.Code) t.Errorf("Expected status 201 but got:%d", httpWriter.Code)
@ -113,7 +113,7 @@ func (e errorOnWriteRecorder) Write(bytes []byte) (int, error) {
// go test -v -test.run TestLastWriteErrorCaught ...restful // go test -v -test.run TestLastWriteErrorCaught ...restful
func TestLastWriteErrorCaught(t *testing.T) { func TestLastWriteErrorCaught(t *testing.T) {
httpWriter := errorOnWriteRecorder{httptest.NewRecorder()} httpWriter := errorOnWriteRecorder{httptest.NewRecorder()}
resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
err := resp.WriteAsJson(food{"Juicy"}) err := resp.WriteAsJson(food{"Juicy"})
if err.Error() != "fail" { if err.Error() != "fail" {
t.Errorf("Unexpected error message:%v", err) t.Errorf("Unexpected error message:%v", err)
@ -124,7 +124,7 @@ func TestLastWriteErrorCaught(t *testing.T) {
func TestAcceptStarStar_Issue83(t *testing.T) { func TestAcceptStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
// Accept Produces // Accept Produces
resp := Response{httpWriter, "application/bogus,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"}) resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type") ct := httpWriter.Header().Get("Content-Type")
if "application/json" != ct { if "application/json" != ct {
@ -136,7 +136,7 @@ func TestAcceptStarStar_Issue83(t *testing.T) {
func TestAcceptSkipStarStar_Issue83(t *testing.T) { func TestAcceptSkipStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
// Accept Produces // Accept Produces
resp := Response{httpWriter, " application/xml ,*/* ; q=0.8", []string{"application/json", "application/xml"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: " application/xml ,*/* ; q=0.8", routeProduces: []string{"application/json", "application/xml"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"}) resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type") ct := httpWriter.Header().Get("Content-Type")
if "application/xml" != ct { if "application/xml" != ct {
@ -148,7 +148,7 @@ func TestAcceptSkipStarStar_Issue83(t *testing.T) {
func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) { func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
// Accept Produces // Accept Produces
resp := Response{httpWriter, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteEntity(food{"Juicy"}) resp.WriteEntity(food{"Juicy"})
ct := httpWriter.Header().Get("Content-Type") ct := httpWriter.Header().Get("Content-Type")
if "application/json" != ct { if "application/json" != ct {
@ -159,7 +159,7 @@ func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) {
// go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful // go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful
func TestWriteHeaderNoContent_Issue124(t *testing.T) { func TestWriteHeaderNoContent_Issue124(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "text/plain", []string{"text/plain"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "text/plain", routeProduces: []string{"text/plain"}, prettyPrint: true}
resp.WriteHeader(http.StatusNoContent) resp.WriteHeader(http.StatusNoContent)
if httpWriter.Code != http.StatusNoContent { if httpWriter.Code != http.StatusNoContent {
t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent) t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent)
@ -169,7 +169,7 @@ func TestWriteHeaderNoContent_Issue124(t *testing.T) {
// go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful // go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful
func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) { func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteHeader(http.StatusNotModified) resp.WriteHeader(http.StatusNotModified)
if httpWriter.Code != http.StatusNotModified { if httpWriter.Code != http.StatusNotModified {
t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified) t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified)
@ -178,7 +178,7 @@ func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) {
func TestWriteHeaderAndEntity_Issue235(t *testing.T) { func TestWriteHeaderAndEntity_Issue235(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/json", routeProduces: []string{"application/json"}, prettyPrint: true}
var pong = struct { var pong = struct {
Foo string `json:"foo"` Foo string `json:"foo"`
}{Foo: "123"} }{Foo: "123"}
@ -194,9 +194,18 @@ func TestWriteHeaderAndEntity_Issue235(t *testing.T) {
} }
} }
func TestWriteEntityNotAcceptable(t *testing.T) { func TestWriteEntityNoAcceptMatchWithProduces(t *testing.T) {
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
resp := Response{httpWriter, "application/bogus", []string{"application/json"}, 0, 0, true, nil} resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{"application/json"}, prettyPrint: true}
resp.WriteEntity("done")
if httpWriter.Code != http.StatusOK {
t.Errorf("got %d want %d", httpWriter.Code, http.StatusOK)
}
}
func TestWriteEntityNoAcceptMatchNoProduces(t *testing.T) {
httpWriter := httptest.NewRecorder()
resp := Response{ResponseWriter: httpWriter, requestAccept: "application/bogus", routeProduces: []string{}, prettyPrint: true}
resp.WriteEntity("done") resp.WriteEntity("done")
if httpWriter.Code != http.StatusNotAcceptable { if httpWriter.Code != http.StatusNotAcceptable {
t.Errorf("got %d want %d", httpWriter.Code, http.StatusNotAcceptable) t.Errorf("got %d want %d", httpWriter.Code, http.StatusNotAcceptable)

View file

@ -13,6 +13,11 @@ import (
// RouteFunction declares the signature of a function that can be bound to a Route. // RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response) type RouteFunction func(*Request, *Response)
// RouteSelectionConditionFunction declares the signature of a function that
// can be used to add extra conditional logic when selecting whether the route
// matches the HTTP request.
type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct { type Route struct {
Method string Method string
@ -21,6 +26,7 @@ type Route struct {
Path string // webservice root path + described path Path string // webservice root path + described path
Function RouteFunction Function RouteFunction
Filters []FilterFunction Filters []FilterFunction
If []RouteSelectionConditionFunction
// cached values for dispatching // cached values for dispatching
relativePath string relativePath string
@ -34,6 +40,9 @@ type Route struct {
ParameterDocs []*Parameter ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload ReadSample, WriteSample interface{} // structs that model an example request or response payload
// Extra information used to store custom information about the route.
Metadata map[string]interface{}
} }
// Initialize for Route // Initialize for Route
@ -97,7 +106,7 @@ func (r Route) matchesContentType(mimeTypes string) bool {
} }
if len(mimeTypes) == 0 { if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
m := r.Method m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true return true

View file

@ -5,10 +5,12 @@ package restful
// that can be found in the LICENSE file. // that can be found in the LICENSE file.
import ( import (
"fmt"
"os" "os"
"reflect" "reflect"
"runtime" "runtime"
"strings" "strings"
"sync/atomic"
"github.com/emicklei/go-restful/log" "github.com/emicklei/go-restful/log"
) )
@ -22,6 +24,10 @@ type RouteBuilder struct {
httpMethod string // required httpMethod string // required
function RouteFunction // required function RouteFunction // required
filters []FilterFunction filters []FilterFunction
conditions []RouteSelectionConditionFunction
typeNameHandleFunc TypeNameHandleFunction // required
// documentation // documentation
doc string doc string
notes string notes string
@ -29,6 +35,7 @@ type RouteBuilder struct {
readSample, writeSample interface{} readSample, writeSample interface{}
parameters []*Parameter parameters []*Parameter
errorMap map[int]ResponseError errorMap map[int]ResponseError
metadata map[string]interface{}
} }
// Do evaluates each argument with the RouteBuilder itself. // Do evaluates each argument with the RouteBuilder itself.
@ -83,7 +90,7 @@ func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
return b return b
} }
// A verbose explanation of the operation behavior. Optional. // Notes is a verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder { func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes b.notes = notes
return b return b
@ -92,8 +99,13 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
// Reads tells what resource type will be read from the request payload. Optional. // Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. // A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder { func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
fn := b.typeNameHandleFunc
if fn == nil {
fn = reflectTypeName
}
typeAsName := fn(sample)
b.readSample = sample b.readSample = sample
typeAsName := reflect.TypeOf(sample).String()
bodyParameter := &Parameter{&ParameterData{Name: "body"}} bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody() bodyParameter.beBody()
bodyParameter.Required(true) bodyParameter.Required(true)
@ -128,7 +140,7 @@ func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
return b return b
} }
// Operation allows you to document what the acutal method/function call is of the Route. // Operation allows you to document what the actual method/function call is of the Route.
// Unless called, the operation name is derived from the RouteFunction set using To(..). // Unless called, the operation name is derived from the RouteFunction set using To(..).
func (b *RouteBuilder) Operation(name string) *RouteBuilder { func (b *RouteBuilder) Operation(name string) *RouteBuilder {
b.operation = name b.operation = name
@ -148,6 +160,7 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
Code: code, Code: code,
Message: message, Message: message,
Model: model, Model: model,
IsDefault: false,
} }
// lazy init because there is no NewRouteBuilder (yet) // lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil { if b.errorMap == nil {
@ -157,10 +170,36 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
return b return b
} }
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
b.Returns(0, message, model)
// Modify the ResponseError just added/updated
re := b.errorMap[0]
// errorMap is initialized
b.errorMap[0] = ResponseError{
Code: re.Code,
Message: re.Message,
Model: re.Model,
IsDefault: true,
}
return b
}
// Metadata adds or updates a key=value pair to the metadata map.
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
if b.metadata == nil {
b.metadata = map[string]interface{}{}
}
b.metadata[key] = value
return b
}
// ResponseError represents a response; not necessarily an error.
type ResponseError struct { type ResponseError struct {
Code int Code int
Message string Message string
Model interface{} Model interface{}
IsDefault bool
} }
func (b *RouteBuilder) servicePath(path string) *RouteBuilder { func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
@ -174,6 +213,21 @@ func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
return b return b
} }
// If sets a condition function that controls matching the Route based on custom logic.
// The condition function is provided the HTTP request and should return true if the route
// should be considered.
//
// Efficiency note: the condition function is called before checking the method, produces, and
// consumes criteria, so that the correct HTTP status code can be returned.
//
// Lifecycle note: no filter functions have been called prior to calling the condition function,
// so the condition function should not depend on any context that might be set up by container
// or route filters.
func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
b.conditions = append(b.conditions, condition)
return b
}
// If no specific Route path then set to rootPath // If no specific Route path then set to rootPath
// If no specific Produces then set to rootProduces // If no specific Produces then set to rootProduces
// If no specific Consumes then set to rootConsumes // If no specific Consumes then set to rootConsumes
@ -186,6 +240,13 @@ func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
} }
} }
// typeNameHandler sets the function that will convert types to strings in the parameter
// and model definitions.
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
b.typeNameHandleFunc = handler
return b
}
// Build creates a new Route using the specification details collected by the RouteBuilder // Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route { func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath) pathExpr, err := newPathExpression(b.currentPath)
@ -209,6 +270,7 @@ func (b *RouteBuilder) Build() Route {
Consumes: b.consumes, Consumes: b.consumes,
Function: b.function, Function: b.function,
Filters: b.filters, Filters: b.filters,
If: b.conditions,
relativePath: b.currentPath, relativePath: b.currentPath,
pathExpr: pathExpr, pathExpr: pathExpr,
Doc: b.doc, Doc: b.doc,
@ -217,7 +279,8 @@ func (b *RouteBuilder) Build() Route {
ParameterDocs: b.parameters, ParameterDocs: b.parameters,
ResponseErrors: b.errorMap, ResponseErrors: b.errorMap,
ReadSample: b.readSample, ReadSample: b.readSample,
WriteSample: b.writeSample} WriteSample: b.writeSample,
Metadata: b.metadata}
route.postBuild() route.postBuild()
return route return route
} }
@ -226,6 +289,8 @@ func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
} }
var anonymousFuncCount int32
// nameOfFunction returns the short name of the function f for documentation. // nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions. // It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string { func nameOfFunction(f interface{}) string {
@ -236,5 +301,10 @@ func nameOfFunction(f interface{}) string {
last = strings.TrimSuffix(last, ")-fm") // Go 1.5 last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5 last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5 last = strings.TrimSuffix(last, "-fm") // Go 1.5
if last == "func1" { // this could mean conflicts in API docs
val := atomic.AddInt32(&anonymousFuncCount, 1)
last = "func" + fmt.Sprintf("%d", val)
atomic.StoreInt32(&anonymousFuncCount, val)
}
return last return last
} }

View file

@ -2,6 +2,7 @@ package restful
import ( import (
"testing" "testing"
"time"
) )
func TestRouteBuilder_PathParameter(t *testing.T) { func TestRouteBuilder_PathParameter(t *testing.T) {
@ -41,7 +42,7 @@ func TestRouteBuilder(t *testing.T) {
json := "application/json" json := "application/json"
b := new(RouteBuilder) b := new(RouteBuilder)
b.To(dummy) b.To(dummy)
b.Path("/routes").Method("HEAD").Consumes(json).Produces(json) b.Path("/routes").Method("HEAD").Consumes(json).Produces(json).Metadata("test", "test-value").DefaultReturns("default", time.Now())
r := b.Build() r := b.Build()
if r.Path != "/routes" { if r.Path != "/routes" {
t.Error("path invalid") t.Error("path invalid")
@ -55,4 +56,21 @@ func TestRouteBuilder(t *testing.T) {
if r.Operation != "dummy" { if r.Operation != "dummy" {
t.Error("Operation not set") t.Error("Operation not set")
} }
if r.Metadata["test"] != "test-value" {
t.Errorf("Metadata not set")
}
if _, ok := r.ResponseErrors[0]; !ok {
t.Fatal("expected default response")
}
}
func TestAnonymousFuncNaming(t *testing.T) {
f1 := func() {}
f2 := func() {}
if got, want := nameOfFunction(f1), "func1"; got != want {
t.Errorf("got %v want %v", got, want)
}
if got, want := nameOfFunction(f2), "func2"; got != want {
t.Errorf("got %v want %v", got, want)
}
} }

View file

@ -1,8 +1,9 @@
package restful package restful
import ( import (
"fmt" "errors"
"os" "os"
"reflect"
"sync" "sync"
"github.com/emicklei/go-restful/log" "github.com/emicklei/go-restful/log"
@ -24,6 +25,8 @@ type WebService struct {
documentation string documentation string
apiVersion string apiVersion string
typeNameHandleFunc TypeNameHandleFunction
dynamicRoutes bool dynamicRoutes bool
// protects 'routes' if dynamic routes are enabled // protects 'routes' if dynamic routes are enabled
@ -34,11 +37,27 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
w.dynamicRoutes = enable w.dynamicRoutes = enable
} }
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
// into the restful documentation for the service.
type TypeNameHandleFunction func(sample interface{}) string
// TypeNameHandler sets the function that will convert types to strings in the parameter
// and model definitions. If not set, the web service will invoke
// reflect.TypeOf(object).String().
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
w.typeNameHandleFunc = handler
return w
}
// reflectTypeName is the default TypeNameHandleFunction and for a given object
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
// the reflection API.
func reflectTypeName(sample interface{}) string {
return reflect.TypeOf(sample).String()
}
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. // compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
func (w *WebService) compilePathExpression() { func (w *WebService) compilePathExpression() {
if len(w.rootPath) == 0 {
w.Path("/") // lazy initialize path
}
compiled, err := newPathExpression(w.rootPath) compiled, err := newPathExpression(w.rootPath)
if err != nil { if err != nil {
log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err) log.Printf("[restful] invalid path:%s because:%v", w.rootPath, err)
@ -54,12 +73,15 @@ func (w *WebService) ApiVersion(apiVersion string) *WebService {
} }
// Version returns the API version for documentation purposes. // Version returns the API version for documentation purposes.
func (w WebService) Version() string { return w.apiVersion } func (w *WebService) Version() string { return w.apiVersion }
// Path specifies the root URL template path of the WebService. // Path specifies the root URL template path of the WebService.
// All Routes will be relative to this path. // All Routes will be relative to this path.
func (w *WebService) Path(root string) *WebService { func (w *WebService) Path(root string) *WebService {
w.rootPath = root w.rootPath = root
if len(w.rootPath) == 0 {
w.rootPath = "/"
}
w.compilePathExpression() w.compilePathExpression()
return w return w
} }
@ -155,21 +177,26 @@ func (w *WebService) Route(builder *RouteBuilder) *WebService {
// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' // RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
func (w *WebService) RemoveRoute(path, method string) error { func (w *WebService) RemoveRoute(path, method string) error {
if !w.dynamicRoutes { if !w.dynamicRoutes {
return fmt.Errorf("dynamic routes are not enabled.") return errors.New("dynamic routes are not enabled.")
} }
w.routesLock.Lock() w.routesLock.Lock()
defer w.routesLock.Unlock() defer w.routesLock.Unlock()
newRoutes := make([]Route, (len(w.routes) - 1))
current := 0
for ix := range w.routes { for ix := range w.routes {
if w.routes[ix].Method == method && w.routes[ix].Path == path { if w.routes[ix].Method == method && w.routes[ix].Path == path {
w.routes = append(w.routes[:ix], w.routes[ix+1:]...) continue
} }
newRoutes[current] = w.routes[ix]
current = current + 1
} }
w.routes = newRoutes
return nil return nil
} }
// Method creates a new RouteBuilder and initialize its http method // Method creates a new RouteBuilder and initialize its http method
func (w *WebService) Method(httpMethod string) *RouteBuilder { func (w *WebService) Method(httpMethod string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
} }
// Produces specifies that this WebService can produce one or more MIME types. // Produces specifies that this WebService can produce one or more MIME types.
@ -187,7 +214,7 @@ func (w *WebService) Consumes(accepts ...string) *WebService {
} }
// Routes returns the Routes associated with this WebService // Routes returns the Routes associated with this WebService
func (w WebService) Routes() []Route { func (w *WebService) Routes() []Route {
if !w.dynamicRoutes { if !w.dynamicRoutes {
return w.routes return w.routes
} }
@ -202,12 +229,12 @@ func (w WebService) Routes() []Route {
} }
// RootPath returns the RootPath associated with this WebService. Default "/" // RootPath returns the RootPath associated with this WebService. Default "/"
func (w WebService) RootPath() string { func (w *WebService) RootPath() string {
return w.rootPath return w.rootPath
} }
// PathParameters return the path parameter names for (shared amoung its Routes) // PathParameters return the path parameter names for (shared among its Routes)
func (w WebService) PathParameters() []*Parameter { func (w *WebService) PathParameters() []*Parameter {
return w.pathParameters return w.pathParameters
} }
@ -224,7 +251,7 @@ func (w *WebService) Doc(plainText string) *WebService {
} }
// Documentation returns it. // Documentation returns it.
func (w WebService) Documentation() string { func (w *WebService) Documentation() string {
return w.documentation return w.documentation
} }
@ -234,30 +261,30 @@ func (w WebService) Documentation() string {
// HEAD is a shortcut for .Method("HEAD").Path(subPath) // HEAD is a shortcut for .Method("HEAD").Path(subPath)
func (w *WebService) HEAD(subPath string) *RouteBuilder { func (w *WebService) HEAD(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
} }
// GET is a shortcut for .Method("GET").Path(subPath) // GET is a shortcut for .Method("GET").Path(subPath)
func (w *WebService) GET(subPath string) *RouteBuilder { func (w *WebService) GET(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
} }
// POST is a shortcut for .Method("POST").Path(subPath) // POST is a shortcut for .Method("POST").Path(subPath)
func (w *WebService) POST(subPath string) *RouteBuilder { func (w *WebService) POST(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
} }
// PUT is a shortcut for .Method("PUT").Path(subPath) // PUT is a shortcut for .Method("PUT").Path(subPath)
func (w *WebService) PUT(subPath string) *RouteBuilder { func (w *WebService) PUT(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
} }
// PATCH is a shortcut for .Method("PATCH").Path(subPath) // PATCH is a shortcut for .Method("PATCH").Path(subPath)
func (w *WebService) PATCH(subPath string) *RouteBuilder { func (w *WebService) PATCH(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
} }
// DELETE is a shortcut for .Method("DELETE").Path(subPath) // DELETE is a shortcut for .Method("DELETE").Path(subPath)
func (w *WebService) DELETE(subPath string) *RouteBuilder { func (w *WebService) DELETE(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath) return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
} }

View file

@ -44,6 +44,8 @@ func TestCapturePanic(t *testing.T) {
httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil) httpRequest, _ := http.NewRequest("GET", "http://here.com/fire", nil)
httpRequest.Header.Set("Accept", "*/*") httpRequest.Header.Set("Accept", "*/*")
httpWriter := httptest.NewRecorder() httpWriter := httptest.NewRecorder()
// override the default here
DefaultContainer.DoNotRecover(false)
DefaultContainer.dispatch(httpWriter, httpRequest) DefaultContainer.dispatch(httpWriter, httpRequest)
if 500 != httpWriter.Code { if 500 != httpWriter.Code {
t.Error("500 expected on fire") t.Error("500 expected on fire")
@ -110,6 +112,17 @@ func TestContentType415_Issue170(t *testing.T) {
} }
} }
func TestNoContentTypePOST(t *testing.T) {
tearDown()
Add(newPostNoConsumesService())
httpRequest, _ := http.NewRequest("POST", "http://here.com/post", nil)
httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest)
if 204 != httpWriter.Code {
t.Errorf("Expected 204, got %d", httpWriter.Code)
}
}
func TestContentType415_POST_Issue170(t *testing.T) { func TestContentType415_POST_Issue170(t *testing.T) {
tearDown() tearDown()
Add(newPostOnlyJsonOnlyService()) Add(newPostOnlyJsonOnlyService())
@ -171,6 +184,41 @@ func TestRemoveRoute(t *testing.T) {
t.Errorf("got %v, want %v", got, want) t.Errorf("got %v, want %v", got, want)
} }
} }
func TestRemoveLastRoute(t *testing.T) {
tearDown()
TraceLogger(testLogger{t})
ws := newGetPlainTextOrJsonServiceMultiRoute()
Add(ws)
httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil)
httpRequest.Header.Set("Accept", "text/plain")
httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest)
if got, want := httpWriter.Code, 200; got != want {
t.Errorf("got %v, want %v", got, want)
}
// dynamic apis are disabled, should error and do nothing
if err := ws.RemoveRoute("/get", "GET"); err == nil {
t.Error("unexpected non-error")
}
httpWriter = httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest)
if got, want := httpWriter.Code, 200; got != want {
t.Errorf("got %v, want %v", got, want)
}
ws.SetDynamicRoutes(true)
if err := ws.RemoveRoute("/get", "GET"); err != nil {
t.Errorf("unexpected error %v", err)
}
httpWriter = httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest)
if got, want := httpWriter.Code, 404; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
// go test -v -test.run TestContentTypeOctet_Issue170 ...restful // go test -v -test.run TestContentTypeOctet_Issue170 ...restful
func TestContentTypeOctet_Issue170(t *testing.T) { func TestContentTypeOctet_Issue170(t *testing.T) {
@ -193,6 +241,29 @@ func TestContentTypeOctet_Issue170(t *testing.T) {
} }
} }
type exampleBody struct{}
func TestParameterDataTypeDefaults(t *testing.T) {
tearDown()
ws := new(WebService)
route := ws.POST("/post").Reads(&exampleBody{})
if route.parameters[0].data.DataType != "*restful.exampleBody" {
t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data)
}
}
func TestParameterDataTypeCustomization(t *testing.T) {
tearDown()
ws := new(WebService)
ws.TypeNameHandler(func(sample interface{}) string {
return "my.custom.type.name"
})
route := ws.POST("/post").Reads(&exampleBody{})
if route.parameters[0].data.DataType != "my.custom.type.name" {
t.Errorf("body parameter incorrect name: %#v", route.parameters[0].data)
}
}
func newPanicingService() *WebService { func newPanicingService() *WebService {
ws := new(WebService).Path("") ws := new(WebService).Path("")
ws.Route(ws.GET("/fire").To(doPanic)) ws.Route(ws.GET("/fire").To(doPanic))
@ -226,6 +297,14 @@ func newGetPlainTextOrJsonService() *WebService {
return ws return ws
} }
func newGetPlainTextOrJsonServiceMultiRoute() *WebService {
ws := new(WebService).Path("")
ws.Produces("text/plain", "application/json")
ws.Route(ws.GET("/get").To(doNothing))
ws.Route(ws.GET("/status").To(doNothing))
return ws
}
func newGetConsumingOctetStreamService() *WebService { func newGetConsumingOctetStreamService() *WebService {
ws := new(WebService).Path("") ws := new(WebService).Path("")
ws.Consumes("application/octet-stream") ws.Consumes("application/octet-stream")
@ -233,6 +312,12 @@ func newGetConsumingOctetStreamService() *WebService {
return ws return ws
} }
func newPostNoConsumesService() *WebService {
ws := new(WebService).Path("")
ws.Route(ws.POST("/post").To(return204))
return ws
}
func newSelectedRouteTestingService() *WebService { func newSelectedRouteTestingService() *WebService {
ws := new(WebService).Path("") ws := new(WebService).Path("")
ws.Route(ws.GET(pathGetFriends).To(selectedRouteChecker)) ws.Route(ws.GET(pathGetFriends).To(selectedRouteChecker))
@ -252,3 +337,7 @@ func doPanic(req *Request, resp *Response) {
func doNothing(req *Request, resp *Response) { func doNothing(req *Request, resp *Response) {
} }
func return204(req *Request, resp *Response) {
resp.WriteHeader(204)
}

View file

@ -186,7 +186,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) {
var expandOptions *spec.ExpandOptions var expandOptions *spec.ExpandOptions
if len(options) > 0 { if len(options) > 0 {
expandOptions = options[1] expandOptions = options[0]
} else { } else {
expandOptions = &spec.ExpandOptions{ expandOptions = &spec.ExpandOptions{
RelativeBase: filepath.Dir(d.specFilePath), RelativeBase: filepath.Dir(d.specFilePath),

View file

@ -32,6 +32,19 @@ func TestLoadsYAMLContent(t *testing.T) {
} }
} }
// for issue 11
func TestRegressionExpand(t *testing.T) {
swaggerFile := "fixtures/yaml/swagger/1/2/3/4/swagger.yaml"
document, err := Spec(swaggerFile)
assert.NoError(t, err)
assert.NotNil(t, document)
d, err := document.Expanded()
assert.NoError(t, err)
assert.NotNil(t, d)
b, _ := d.Spec().MarshalJSON()
assert.JSONEq(t, expectedExpanded, string(b))
}
func TestFailsInvalidJSON(t *testing.T) { func TestFailsInvalidJSON(t *testing.T) {
_, err := Analyzed(json.RawMessage([]byte("{]")), "") _, err := Analyzed(json.RawMessage([]byte("{]")), "")
@ -499,3 +512,132 @@ const PetStore20 = `{
} }
} }
` `
const expectedExpanded = `
{
"produces":[
"application/json",
"plain/text"
],
"schemes":[
"https",
"http"
],
"swagger":"2.0",
"info":{
"description":"Something",
"title":"Something",
"contact":{
"name":"Somebody",
"url":"https://url.com",
"email":"email@url.com"
},
"version":"v1"
},
"host":"security.sonusnet.com",
"basePath":"/api",
"paths":{
"/whatnot":{
"get":{
"description":"Get something",
"responses":{
"200":{
"description":"The something",
"schema":{
"description":"A collection of service events",
"type":"object",
"properties":{
"page":{
"description":"A description of a paged result",
"type":"object",
"properties":{
"page":{
"description":"the page that was requested",
"type":"integer"
},
"page_items":{
"description":"the number of items per page requested",
"type":"integer"
},
"pages":{
"description":"the total number of pages available",
"type":"integer"
},
"total_items":{
"description":"the total number of items available",
"type":"integer",
"format":"int64"
}
}
},
"something":{
"description":"Something",
"type":"object",
"properties":{
"p1":{
"description":"A string",
"type":"string"
},
"p2":{
"description":"An integer",
"type":"integer"
}
}
}
}
}
},
"500":{
"description":"Oops"
}
}
}
}
},
"definitions":{
"Something":{
"description":"A collection of service events",
"type":"object",
"properties":{
"page":{
"description":"A description of a paged result",
"type":"object",
"properties":{
"page":{
"description":"the page that was requested",
"type":"integer"
},
"page_items":{
"description":"the number of items per page requested",
"type":"integer"
},
"pages":{
"description":"the total number of pages available",
"type":"integer"
},
"total_items":{
"description":"the total number of items available",
"type":"integer",
"format":"int64"
}
}
},
"something":{
"description":"Something",
"type":"object",
"properties":{
"p1":{
"description":"A string",
"type":"string"
},
"p2":{
"description":"An integer",
"type":"integer"
}
}
}
}
}
}
}
`

View file

@ -352,14 +352,12 @@ func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
} }
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
tgt := reflect.ValueOf(target) tgt := reflect.ValueOf(target)
if tgt.Kind() != reflect.Ptr { if tgt.Kind() != reflect.Ptr {
return fmt.Errorf("resolve ref: target needs to be a pointer") return fmt.Errorf("resolve ref: target needs to be a pointer")
} }
oldRef := currentRef oldRef := currentRef
if currentRef != nil { if currentRef != nil {
debugLog("resolve ref current %s new %s", currentRef.String(), ref.String()) debugLog("resolve ref current %s new %s", currentRef.String(), ref.String())
nextRef := nextRef(node, ref, currentRef.GetPointer()) nextRef := nextRef(node, ref, currentRef.GetPointer())
@ -467,8 +465,6 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
return err return err
} }
r.currentRef = currentRef
return nil return nil
} }
@ -645,14 +641,18 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
return resolver.root.(*Schema), nil return resolver.root.(*Schema), nil
} }
// t is the new expanded schema
var t *Schema var t *Schema
var basePath string
b, _ := json.Marshal(target)
debugLog("Target is: %s", string(b))
for target.Ref.String() != "" { for target.Ref.String() != "" {
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) { if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
return &target, nil return &target, nil
} }
basePath = target.Ref.RemoteURI()
debugLog("\n\n\n\n\nbasePath: %s", basePath)
b, _ := json.Marshal(target)
debugLog("calling Resolve with target: %s", string(b))
if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) { if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) {
return &target, err return &target, err
} }
@ -666,7 +666,13 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
target = *t target = *t
} }
} }
if target.Ref.String() == "" {
b, _ := json.Marshal(target)
debugLog("before: %s", string(b))
modifyRefs(&target, basePath)
b, _ = json.Marshal(target)
debugLog("after: %s", string(b))
}
t, err := expandItems(target, parentRefs, resolver) t, err := expandItems(target, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) { if shouldStopOnError(err, resolver.options) {
return &target, err return &target, err
@ -675,6 +681,8 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*
target = *t target = *t
} }
resolver.reset()
for i := range target.AllOf { for i := range target.AllOf {
t, err := expandSchema(target.AllOf[i], parentRefs, resolver) t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) { if shouldStopOnError(err, resolver.options) {

View file

@ -178,9 +178,14 @@ func (i *Items) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &simpleSchema); err != nil { if err := json.Unmarshal(data, &simpleSchema); err != nil {
return err return err
} }
var vendorExtensible VendorExtensible
if err := json.Unmarshal(data, &vendorExtensible); err != nil {
return err
}
i.Refable = ref i.Refable = ref
i.CommonValidations = validations i.CommonValidations = validations
i.SimpleSchema = simpleSchema i.SimpleSchema = simpleSchema
i.VendorExtensible = vendorExtensible
return nil return nil
} }
@ -198,7 +203,11 @@ func (i Items) MarshalJSON() ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return swag.ConcatJSON(b3, b1, b2), nil b4, err := json.Marshal(i.VendorExtensible)
if err != nil {
return nil, err
}
return swag.ConcatJSON(b4, b3, b1, b2), nil
} }
// JSONLookup look up a value by the json property name // JSONLookup look up a value by the json property name

82
vendor/github.com/go-openapi/spec/refmodifier.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
// Copyright 2017 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"fmt"
)
func modifyItemsRefs(target *Schema, basePath string) {
if target.Items != nil {
if target.Items.Schema != nil {
modifyRefs(target.Items.Schema, basePath)
}
for i := range target.Items.Schemas {
s := target.Items.Schemas[i]
modifyRefs(&s, basePath)
target.Items.Schemas[i] = s
}
}
}
func modifyRefs(target *Schema, basePath string) {
if target.Ref.String() != "" {
if target.Ref.RemoteURI() == basePath {
return
}
newURL := fmt.Sprintf("%s%s", basePath, target.Ref.String())
target.Ref, _ = NewRef(newURL)
}
modifyItemsRefs(target, basePath)
for i := range target.AllOf {
modifyRefs(&target.AllOf[i], basePath)
}
for i := range target.AnyOf {
modifyRefs(&target.AnyOf[i], basePath)
}
for i := range target.OneOf {
modifyRefs(&target.OneOf[i], basePath)
}
if target.Not != nil {
modifyRefs(target.Not, basePath)
}
for k := range target.Properties {
s := target.Properties[k]
modifyRefs(&s, basePath)
target.Properties[k] = s
}
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
modifyRefs(target.AdditionalProperties.Schema, basePath)
}
for k := range target.PatternProperties {
s := target.PatternProperties[k]
modifyRefs(&s, basePath)
target.PatternProperties[k] = s
}
for k := range target.Dependencies {
if target.Dependencies[k].Schema != nil {
modifyRefs(target.Dependencies[k].Schema, basePath)
}
}
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
modifyRefs(target.AdditionalItems.Schema, basePath)
}
for k := range target.Definitions {
s := target.Definitions[k]
modifyRefs(&s, basePath)
target.Definitions[k] = s
}
}

335
vendor/github.com/go-openapi/spec/refmodifier_test.go generated vendored Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2017 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
var testJsonSchema = `{
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "#/definitions/positiveInteger" },
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/positiveInteger" },
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}
`
var modifiedTestJsonSchema = `{
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
"minLength": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "" },
{ "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
"minItems": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" },
"minProperties": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "" },
{ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
"anyOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
"oneOf": { "$ref": "http://json-schema.org/draft-04/schema#/definitions/schemaArray" },
"not": { "$ref": "" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}
`
func TestRefModifier(t *testing.T) {
sch := Schema{}
assert.NoError(t, json.Unmarshal([]byte(testJsonSchema), &sch))
modifyRefs(&sch, "http://json-schema.org/draft-04/schema")
b, err := sch.MarshalJSON()
assert.NoError(t, err)
assert.JSONEq(t, modifiedTestJsonSchema, string(b))
}

View file

@ -156,7 +156,7 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
if s.Schema != nil { if s.Schema != nil {
return json.Marshal(s.Schema) return json.Marshal(s.Schema)
} }
return nil, nil return []byte("null"), nil
} }
// UnmarshalJSON converts this schema object or array from a JSON structure // UnmarshalJSON converts this schema object or array from a JSON structure

View file

@ -1,7 +1,7 @@
env: env:
- PROTOBUF_VERSION=2.6.1 - PROTOBUF_VERSION=2.6.1
- PROTOBUF_VERSION=3.0.2 - PROTOBUF_VERSION=3.0.2
- PROTOBUF_VERSION=3.1.0 - PROTOBUF_VERSION=3.4.0
before_install: before_install:
- ./install-protobuf.sh - ./install-protobuf.sh
@ -10,15 +10,11 @@ before_install:
script: script:
- PATH=/home/travis/bin:$PATH make buildserverall - PATH=/home/travis/bin:$PATH make buildserverall
- echo $TRAVIS_GO_VERSION - echo $TRAVIS_GO_VERSION
- if [ "$TRAVIS_GO_VERSION" == 1.8 ] && [[ "$PROTOBUF_VERSION" == 3.1.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi - if [ "$TRAVIS_GO_VERSION" == 1.9 ] && [[ "$PROTOBUF_VERSION" == 3.4.0 ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi
language: go language: go
go: go:
- 1.6.3 - 1.8.3
- 1.7.1 - 1.9
- 1.8
matrix:
allow_failures:
- go: 1.6.3

View file

@ -10,5 +10,6 @@
# Please keep the list sorted. # Please keep the list sorted.
Sendgrid, Inc
Vastech SA (PTY) LTD Vastech SA (PTY) LTD
Walter Schulze <awalterschulze@gmail.com> Walter Schulze <awalterschulze@gmail.com>

View file

@ -1,4 +1,5 @@
Anton Povarov <anton.povarov@gmail.com> Anton Povarov <anton.povarov@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Clayton Coleman <ccoleman@redhat.com> Clayton Coleman <ccoleman@redhat.com>
Denis Smirnov <denis.smirnov.91@gmail.com> Denis Smirnov <denis.smirnov.91@gmail.com>
DongYun Kang <ceram1000@gmail.com> DongYun Kang <ceram1000@gmail.com>
@ -10,9 +11,12 @@ John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org> John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com> Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com> Patrick Lee <patrick@dropbox.com>
Roger Johansson <rogeralsing@gmail.com>
Sam Nguyen <sam.nguyen@sendgrid.com>
Sergio Arbeo <serabe@gmail.com> Sergio Arbeo <serabe@gmail.com>
Stephen J Day <stephen.day@docker.com> Stephen J Day <stephen.day@docker.com>
Tamir Duberstein <tamird@gmail.com> Tamir Duberstein <tamird@gmail.com>
Todd Eisenberger <teisenberger@dropbox.com> Todd Eisenberger <teisenberger@dropbox.com>
Tormod Erevik Lea <tormodlea@gmail.com> Tormod Erevik Lea <tormodlea@gmail.com>
Vyacheslav Kim <kane@sendgrid.com>
Walter Schulze <awalterschulze@gmail.com> Walter Schulze <awalterschulze@gmail.com>

View file

@ -83,6 +83,7 @@ regenerate:
make -C test/oneof regenerate make -C test/oneof regenerate
make -C test/oneof3 regenerate make -C test/oneof3 regenerate
make -C test/theproto3 regenerate make -C test/theproto3 regenerate
make -C test/mapdefaults regenerate
make -C test/mapsproto2 regenerate make -C test/mapsproto2 regenerate
make -C test/issue42order regenerate make -C test/issue42order regenerate
make -C proto generate-test-pbs make -C proto generate-test-pbs
@ -109,14 +110,19 @@ regenerate:
make -C test/issue260 regenerate make -C test/issue260 regenerate
make -C test/issue261 regenerate make -C test/issue261 regenerate
make -C test/issue262 regenerate make -C test/issue262 regenerate
make -C test/issue312 regenerate
make -C test/enumdecl regenerate make -C test/enumdecl regenerate
make -C test/typedecl_all regenerate make -C test/typedecl_all regenerate
make -C test/enumdecl_all regenerate make -C test/enumdecl_all regenerate
make -C test/int64support regenerate
make -C test/issue322 regenerate
make -C test/issue330 regenerate
make gofmt make gofmt
tests: tests:
go build ./test/enumprefix go build ./test/enumprefix
go test ./... go test ./...
(cd test/stdtypes && make test)
vet: vet:
go vet ./... go vet ./...
@ -138,15 +144,16 @@ testall:
make tests make tests
bench: bench:
go get golang.org/x/tools/cmd/benchcmp
(cd test/mixbench && go build .) (cd test/mixbench && go build .)
(cd test/mixbench && ./mixbench) ./test/mixbench/mixbench
contributors: contributors:
git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS
js: js:
ifeq (go1.8, $(findstring go1.8, $(GO_VERSION))) ifeq (go1.9, $(findstring go1.9, $(GO_VERSION)))
go get github.com/gopherjs/gopherjs go get -u github.com/gopherjs/gopherjs
gopherjs build github.com/gogo/protobuf/protoc-gen-gogo gopherjs build github.com/gogo/protobuf/protoc-gen-gogo
endif endif

View file

@ -25,7 +25,7 @@ To use this software, you must:
for details or, if you are using gccgo, follow the instructions at for details or, if you are using gccgo, follow the instructions at
https://golang.org/doc/install/gccgo https://golang.org/doc/install/gccgo
- Grab the code from the repository and install the proto package. - Grab the code from the repository and install the proto package.
The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.
The compiler plugin, protoc-gen-go, will be installed in $GOBIN, The compiler plugin, protoc-gen-go, will be installed in $GOBIN,
defaulting to $GOPATH/bin. It must be in your $PATH for the protocol defaulting to $GOPATH/bin. It must be in your $PATH for the protocol
compiler, protoc, to find it. compiler, protoc, to find it.
@ -118,7 +118,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences: When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers. - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method. - Enum types do not get an Enum method.
Consider file test.proto, containing Consider file test.proto, containing

View file

@ -38,14 +38,19 @@ These projects use gogoprotobuf:
- <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a> - <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a>
- <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a> - <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a>
- <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a> - <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a>
- <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/dev/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions - <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/master/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions
- <a href="https://containerd.io/">containerd</a> - <a href="https://github.com/containerd/containerd/tree/master/cmd/protoc-gen-gogoctrd">vanity command with custom field names</a> that conforms to the golang convention.
- <a href="https://github.com/heroiclabs/nakama">nakama</a>
- <a href="https://github.com/src-d/proteus">proteus</a>
- <a href="https://github.com/go-graphite">carbonzipper stack</a>
- <a href="https://sendgrid.com/">SendGrid</a>
Please lets us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>. Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>.
### Mentioned ### Mentioned
- <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a> - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a>
- <a href="http://gophercon.sourcegraph.com/post/83747547505/writing-a-high-performance-database-in-go">gophercon</a> - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a>
- <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a> - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a>
## Getting Started ## Getting Started
@ -59,10 +64,10 @@ After that you can choose:
### Installation ### Installation
To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 is continuously tested. To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.8.3 and 1.9 are continuously tested.
Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf).
Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.1.0 are continuously tested. Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.4.0 are continuously tested.
### Speed ### Speed

View file

@ -20,8 +20,8 @@ See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">sizer</a></td><td>Message</td><td>bool</td><td>if true, a Size method is generated for the specific message</td><td>false</td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">sizer</a></td><td>Message</td><td>bool</td><td>if true, a Size method is generated for the specific message</td><td>false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unmarshaler</a></td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated for the specific message </td><td> false</td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unmarshaler</a></td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated for the specific message </td><td> false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">protosizer</a></td><td>Message</td><td>bool</td><td>if true, a ProtoSize method is generated for the specific message</td><td>false</td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/size">protosizer</a></td><td>Message</td><td>bool</td><td>if true, a ProtoSize method is generated for the specific message</td><td>false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto"> unsafe_marshaler</a> </td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. </td><td> false</td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto"> unsafe_marshaler</a> (deprecated) </td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated. </td><td> false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unsafe_unmarshaler</a></td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated for the specific message. The generated code uses the unsafe package and is not compatible with big endian CPUs. </td><td> false</td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/unmarshal">unsafe_unmarshaler</a> (deprecated) </td><td> Message </td><td> bool </td><td> if true, an Unmarshal method is generated. </td><td> false</td></tr>
<tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto">stable_marshaler</a></td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed</td><td> false </td></tr> <tr><td><a href="http://godoc.org/github.com/gogo/protobuf/plugin/marshalto">stable_marshaler</a></td><td> Message </td><td> bool </td><td> if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed</td><td> false </td></tr>
<tr><td>typedecl (beta)</td><td> Message </td><td> bool </td><td> if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated.</td><td> true </td></tr> <tr><td>typedecl (beta)</td><td> Message </td><td> bool </td><td> if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated.</td><td> true </td></tr>
</table> </table>
@ -69,6 +69,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th
<tr><td> goproto_enum_stringer (experimental) </td><td> Enum </td><td> bool </td><td> if false, the enum is generated without the default string method, this is useful for rather using enum_stringer </td><td> true </td></tr> <tr><td> goproto_enum_stringer (experimental) </td><td> Enum </td><td> bool </td><td> if false, the enum is generated without the default string method, this is useful for rather using enum_stringer </td><td> true </td></tr>
<tr><td> goproto_extensions_map (beta) </td><td> Message </td><td> bool </td><td> if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension </td><td> true </td></tr> <tr><td> goproto_extensions_map (beta) </td><td> Message </td><td> bool </td><td> if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension </td><td> true </td></tr>
<tr><td> goproto_unrecognized (beta) </td><td> Message </td><td> bool </td><td>if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. </td><td> true </td></tr> <tr><td> goproto_unrecognized (beta) </td><td> Message </td><td> bool </td><td>if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. </td><td> true </td></tr>
<tr><td> goproto_registration (beta) </td><td> File </td><td> bool </td><td>if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). </td><td> false </td></tr>
</table> </table>
# Less Typing # Less Typing

View file

@ -7,23 +7,26 @@ die() {
exit 1 exit 1
} }
cd /home/travis
case "$PROTOBUF_VERSION" in case "$PROTOBUF_VERSION" in
2*) 2*)
basename=protobuf-$PROTOBUF_VERSION basename=protobuf-$PROTOBUF_VERSION
wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz
tar xzf $basename.tar.gz
cd protobuf-$PROTOBUF_VERSION
./configure --prefix=/home/travis && make -j2 && make install
;; ;;
3*) 3*)
basename=protobuf-cpp-$PROTOBUF_VERSION basename=protoc-$PROTOBUF_VERSION-linux-x86_64
wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.zip
unzip $basename.zip
;; ;;
*) *)
die "unknown protobuf version: $PROTOBUF_VERSION" die "unknown protobuf version: $PROTOBUF_VERSION"
;; ;;
esac esac
cd /home/travis
wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz
tar xzf $basename.tar.gz
cd protobuf-$PROTOBUF_VERSION
./configure --prefix=/home/travis && make -j2 && make install

View file

@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build go1.7
package proto_test package proto_test
import ( import (

View file

@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
// This is the format used for the sint64 protocol buffer type. // This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error { func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift. // use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
} }
func sizeZigzag64(x uint64) int { func sizeZigzag64(x uint64) int {
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
} }
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer // EncodeZigzag32 writes a zigzag-encoded 32-bit integer

View file

@ -29,6 +29,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build go1.7
package proto_test package proto_test
import ( import (

View file

@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences: When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers. - Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method. - Enum types do not get an Enum method.
The simplest way to describe this is to see an example. The simplest way to describe this is to see an example.

View file

@ -193,6 +193,7 @@ type Properties struct {
Default string // default value Default string // default value
HasDefault bool // whether an explicit default was provided HasDefault bool // whether an explicit default was provided
CustomType string CustomType string
CastType string
StdTime bool StdTime bool
StdDuration bool StdDuration bool
@ -341,6 +342,8 @@ func (p *Properties) Parse(s string) {
p.OrigName = strings.Split(f, "=")[1] p.OrigName = strings.Split(f, "=")[1]
case strings.HasPrefix(f, "customtype="): case strings.HasPrefix(f, "customtype="):
p.CustomType = strings.Split(f, "=")[1] p.CustomType = strings.Split(f, "=")[1]
case strings.HasPrefix(f, "casttype="):
p.CastType = strings.Split(f, "=")[1]
case f == "stdtime": case f == "stdtime":
p.StdTime = true p.StdTime = true
case f == "stdduration": case f == "stdduration":

View file

@ -522,6 +522,17 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
} }
return nil return nil
} }
} else if len(props.CastType) > 0 {
if _, ok := v.Interface().(interface {
String() string
}); ok {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
_, err := fmt.Fprintf(w, "%d", v.Interface())
return err
}
}
} else if props.StdTime { } else if props.StdTime {
t, ok := v.Interface().(time.Time) t, ok := v.Interface().(time.Time)
if !ok { if !ok {
@ -531,9 +542,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if err != nil { if err != nil {
return err return err
} }
props.StdTime = false propsCopy := *props // Make a copy so that this is goroutine-safe
err = tm.writeAny(w, reflect.ValueOf(tproto), props) propsCopy.StdTime = false
props.StdTime = true err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
return err return err
} else if props.StdDuration { } else if props.StdDuration {
d, ok := v.Interface().(time.Duration) d, ok := v.Interface().(time.Duration)
@ -541,9 +552,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
} }
dproto := durationProto(d) dproto := durationProto(d)
props.StdDuration = false propsCopy := *props // Make a copy so that this is goroutine-safe
err := tm.writeAny(w, reflect.ValueOf(dproto), props) propsCopy.StdDuration = false
props.StdDuration = true err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
return err return err
} }
} }

View file

@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator) return p.readStruct(fv, terminator)
case reflect.Uint32: case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x)) fv.SetUint(x)
return nil return nil
} }
case reflect.Uint64: case reflect.Uint64:

View file

@ -339,13 +339,13 @@ func TestStringEscaping(t *testing.T) {
} }
// Check round-trip. // Check round-trip.
pb := new(pb.Strings) pbStrings := new(pb.Strings)
if err := proto.UnmarshalText(s, pb); err != nil { if err := proto.UnmarshalText(s, pbStrings); err != nil {
t.Errorf("#%d: UnmarshalText: %v", i, err) t.Errorf("#%d: UnmarshalText: %v", i, err)
continue continue
} }
if !proto.Equal(pb, tc.in) { if !proto.Equal(pbStrings, tc.in) {
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pbStrings)
} }
} }
} }

View file

@ -4,6 +4,7 @@ go:
- 1.6.x - 1.6.x
- 1.7.x - 1.7.x
- 1.8.x - 1.8.x
- 1.9.x
install: install:
- go get -v -d -t github.com/golang/protobuf/... - go get -v -d -t github.com/golang/protobuf/...

View file

@ -1,6 +1,7 @@
# Go support for Protocol Buffers # Go support for Protocol Buffers
[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) [![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)
[![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf)
Google's data interchange format. Google's data interchange format.
Copyright 2010 The Go Authors. Copyright 2010 The Go Authors.
@ -111,6 +112,7 @@ When the .proto file specifies `syntax="proto3"`, there are some differences:
Consider file test.proto, containing Consider file test.proto, containing
```proto ```proto
syntax = "proto2";
package example; package example;
enum FOO { X = 17; }; enum FOO { X = 17; };

View file

@ -62,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo) // any.Unpack(foo)
// ... // ...
// //
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use // The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack // 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/' // methods only use the fully qualified type name after the last '/'

View file

@ -74,6 +74,16 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo) // any.Unpack(foo)
// ... // ...
// //
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use // The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack // 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/' // methods only use the fully qualified type name after the last '/'

View file

@ -0,0 +1,144 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
google/protobuf/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
}

View file

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View file

@ -0,0 +1,160 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
google/protobuf/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
}

View file

@ -0,0 +1,133 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

46
vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md generated vendored Normal file
View file

@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View file

@ -30,7 +30,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
### Mergo in the wild ### Mergo in the wild
- [docker/docker](https://github.com/docker/docker/) - [docker/docker](https://github.com/docker/docker/)
- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [imdario/zas](https://github.com/imdario/zas) - [imdario/zas](https://github.com/imdario/zas)
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) - [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
- [EagerIO/Stout](https://github.com/EagerIO/Stout) - [EagerIO/Stout](https://github.com/EagerIO/Stout)
@ -50,6 +50,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
- [thoas/picfit](https://github.com/thoas/picfit) - [thoas/picfit](https://github.com/thoas/picfit)
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search) - [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [Iris Web Framework](https://github.com/kataras/iris)
## Installation ## Installation
@ -64,15 +65,27 @@ If you were using Mergo **before** April 6th 2015, please check your project wor
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
if err := mergo.Merge(&dst, src); err != nil { ```go
if err := mergo.Merge(&dst, src); err != nil {
// ... // ...
} }
```
Also, you can merge overwriting values using MergeWithOverwrite.
```go
if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
// ...
}
```
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
if err := mergo.Map(&dst, srcMap); err != nil { ```go
if err := mergo.Map(&dst, srcMap); err != nil {
// ... // ...
} }
```
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
@ -96,11 +109,11 @@ type Foo struct {
func main() { func main() {
src := Foo{ src := Foo{
A: "one", A: "one",
B: 2,
} }
dest := Foo{ dest := Foo{
A: "two", A: "two",
B: 2,
} }
mergo.Merge(&dest, src) mergo.Merge(&dest, src)

27
vendor/github.com/imdario/mergo/issue23_test.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
package mergo
import (
"testing"
"time"
)
type document struct {
Created *time.Time
}
func TestIssue23MergeWithOverwrite(t *testing.T) {
now := time.Now()
dst := document{
&now,
}
expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
src := document{
&expected,
}
if err := MergeWithOverwrite(&dst, src); err != nil {
t.Errorf("Error while merging %s", err)
}
if dst.Created != src.Created {
t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created)
}
}

59
vendor/github.com/imdario/mergo/issue38_test.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
package mergo
import (
"testing"
"time"
)
type structWithoutTimePointer struct {
Created time.Time
}
func TestIssue38Merge(t *testing.T) {
dst := structWithoutTimePointer{
time.Now(),
}
expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
src := structWithoutTimePointer{
expected,
}
if err := Merge(&dst, src); err != nil {
t.Errorf("Error while merging %s", err)
}
if dst.Created == src.Created {
t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created)
}
}
func TestIssue38MergeEmptyStruct(t *testing.T) {
dst := structWithoutTimePointer{}
expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
src := structWithoutTimePointer{
expected,
}
if err := Merge(&dst, src); err != nil {
t.Errorf("Error while merging %s", err)
}
if dst.Created == src.Created {
t.Fatalf("Created merged unexpectedly: dst.Created(%v) == src.Created(%v)", dst.Created, src.Created)
}
}
func TestIssue38MergeWithOverwrite(t *testing.T) {
dst := structWithoutTimePointer{
time.Now(),
}
expected := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
src := structWithoutTimePointer{
expected,
}
if err := MergeWithOverwrite(&dst, src); err != nil {
t.Errorf("Error while merging %s", err)
}
if dst.Created != src.Created {
t.Fatalf("Created not merged in properly: dst.Created(%v) != src.Created(%v)", dst.Created, src.Created)
}
}

View file

@ -61,6 +61,13 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
dstMap[fieldName] = src.Field(i).Interface() dstMap[fieldName] = src.Field(i).Interface()
} }
} }
case reflect.Ptr:
if dst.IsNil() {
v := reflect.New(dst.Type().Elem())
dst.Set(v)
}
dst = dst.Elem()
fallthrough
case reflect.Struct: case reflect.Struct:
srcMap := src.Interface().(map[string]interface{}) srcMap := src.Interface().(map[string]interface{})
for key := range srcMap { for key := range srcMap {
@ -85,6 +92,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
srcKind = reflect.Ptr srcKind = reflect.Ptr
} }
} }
if !srcElement.IsValid() { if !srcElement.IsValid() {
continue continue
} }
@ -92,8 +100,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
return return
} }
} else { } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
if srcKind == reflect.Map { if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
return
}
} else if srcKind == reflect.Map {
if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
return return
} }
@ -102,7 +113,6 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over
} }
} }
} }
}
return return
} }

View file

@ -12,6 +12,18 @@ import (
"reflect" "reflect"
) )
func hasExportedField(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
if field.Anonymous {
exported = exported || hasExportedField(dst.Field(i))
} else {
exported = exported || len(field.PkgPath) == 0
}
}
return
}
// Traverses recursively both values, assigning src's fields values to dst. // Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows // The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types. // short circuiting on recursive types.
@ -34,12 +46,22 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
} }
switch dst.Kind() { switch dst.Kind() {
case reflect.Struct: case reflect.Struct:
if hasExportedField(dst) {
for i, n := 0, dst.NumField(); i < n; i++ { for i, n := 0, dst.NumField(); i < n; i++ {
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
return return
} }
} }
} else {
if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
}
case reflect.Map: case reflect.Map:
if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 {
dst.Set(reflect.MakeMap(dst.Type()))
return
}
for _, key := range src.MapKeys() { for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key) srcElement := src.MapIndex(key)
if !srcElement.IsValid() { if !srcElement.IsValid() {
@ -67,6 +89,10 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
} }
} }
} }
if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
continue
}
if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
if dst.IsNil() { if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type())) dst.Set(reflect.MakeMap(dst.Type()))
@ -77,9 +103,27 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov
case reflect.Ptr: case reflect.Ptr:
fallthrough fallthrough
case reflect.Interface: case reflect.Interface:
if src.Kind() != reflect.Interface {
if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
return
}
} else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, overwrite); err != nil {
return
}
} else {
return ErrDifferentArgumentsTypes
}
break
}
if src.IsNil() { if src.IsNil() {
break break
} else if dst.IsNil() { } else if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
dst.Set(src) dst.Set(src)
} }

View file

@ -45,7 +45,7 @@ func isEmptyValue(v reflect.Value) bool {
return v.Uint() == 0 return v.Uint() == 0
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
return v.Float() == 0 return v.Float() == 0
case reflect.Interface, reflect.Ptr: case reflect.Interface, reflect.Ptr, reflect.Func:
return v.IsNil() return v.IsNil()
} }
return false return false

View file

@ -6,12 +6,11 @@
package mergo package mergo
import ( import (
"gopkg.in/yaml.v2"
"io/ioutil" "io/ioutil"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"gopkg.in/yaml.v1"
) )
type simpleTest struct { type simpleTest struct {
@ -24,6 +23,14 @@ type complexTest struct {
ID string ID string
} }
type mapTest struct {
M map[int]int
}
type ifcTest struct {
I interface{}
}
type moreComplextText struct { type moreComplextText struct {
Ct complexTest Ct complexTest
St simpleTest St simpleTest
@ -244,6 +251,50 @@ func TestSliceStruct(t *testing.T) {
} }
} }
func TestEmptyMaps(t *testing.T) {
a := mapTest{}
b := mapTest{
map[int]int{},
}
if err := Merge(&a, b); err != nil {
t.Fail()
}
if !reflect.DeepEqual(a, b) {
t.FailNow()
}
}
func TestEmptyToEmptyMaps(t *testing.T) {
a := mapTest{}
b := mapTest{}
if err := Merge(&a, b); err != nil {
t.Fail()
}
if !reflect.DeepEqual(a, b) {
t.FailNow()
}
}
func TestEmptyToNotEmptyMaps(t *testing.T) {
a := mapTest{map[int]int{
1: 2,
3: 4,
}}
aa := mapTest{map[int]int{
1: 2,
3: 4,
}}
b := mapTest{
map[int]int{},
}
if err := Merge(&a, b); err != nil {
t.Fail()
}
if !reflect.DeepEqual(a, aa) {
t.FailNow()
}
}
func TestMapsWithOverwrite(t *testing.T) { func TestMapsWithOverwrite(t *testing.T) {
m := map[string]simpleTest{ m := map[string]simpleTest{
"a": {}, // overwritten by 16 "a": {}, // overwritten by 16
@ -318,7 +369,8 @@ func TestYAMLMaps(t *testing.T) {
license := loadYAML("testdata/license.yml") license := loadYAML("testdata/license.yml")
ft := thing["fields"].(map[interface{}]interface{}) ft := thing["fields"].(map[interface{}]interface{})
fl := license["fields"].(map[interface{}]interface{}) fl := license["fields"].(map[interface{}]interface{})
expectedLength := len(ft) + len(fl) // license has one extra field (site) and another already existing in thing (author) that Mergo won't override.
expectedLength := len(ft) + len(fl) - 1
if err := Merge(&license, thing); err != nil { if err := Merge(&license, thing); err != nil {
t.Fatal(err.Error()) t.Fatal(err.Error())
} }
@ -393,6 +445,45 @@ func TestSimpleMap(t *testing.T) {
} }
} }
func TestIfcMap(t *testing.T) {
a := ifcTest{}
b := ifcTest{42}
if err := Map(&a, b); err != nil {
t.FailNow()
}
if a.I != 42 {
t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I)
}
if !reflect.DeepEqual(a, b) {
t.FailNow()
}
}
func TestIfcMapNoOverwrite(t *testing.T) {
a := ifcTest{13}
b := ifcTest{42}
if err := Map(&a, b); err != nil {
t.FailNow()
}
if a.I != 13 {
t.Fatalf("a not left alone: a.I(%d) == b.I(%d)", a.I, b.I)
}
}
func TestIfcMapWithOverwrite(t *testing.T) {
a := ifcTest{13}
b := ifcTest{42}
if err := MapWithOverwrite(&a, b); err != nil {
t.FailNow()
}
if a.I != 42 {
t.Fatalf("b not merged in properly: a.I(%d) != b.I(%d)", a.I, b.I)
}
if !reflect.DeepEqual(a, b) {
t.FailNow()
}
}
type pointerMapTest struct { type pointerMapTest struct {
A int A int
hidden int hidden int
@ -434,6 +525,29 @@ func TestBackAndForth(t *testing.T) {
} }
} }
func TestEmbeddedPointerUnpacking(t *testing.T) {
tests := []struct{ input pointerMapTest }{
{pointerMapTest{42, 1, nil}},
{pointerMapTest{42, 1, &simpleTest{66}}},
}
newValue := 77
m := map[string]interface{}{
"b": map[string]interface{}{
"value": newValue,
},
}
for _, test := range tests {
pt := test.input
if err := MapWithOverwrite(&pt, m); err != nil {
t.FailNow()
}
if pt.B.Value != newValue {
t.Fatalf("pt not mapped properly: pt.A.Value(%d) != m[`b`][`value`](%d)", pt.B.Value, newValue)
}
}
}
type structWithTimePointer struct { type structWithTimePointer struct {
Birth *time.Time Birth *time.Time
} }
@ -523,3 +637,26 @@ func TestUnexportedProperty(t *testing.T) {
}() }()
Merge(&a, b) Merge(&a, b)
} }
type structWithBoolPointer struct {
C *bool
}
func TestBooleanPointer(t *testing.T) {
bt, bf := true, false
src := structWithBoolPointer{
&bt,
}
dst := structWithBoolPointer{
&bf,
}
if err := Merge(&dst, src); err != nil {
t.FailNow()
}
if dst.C == src.C {
t.Fatalf("dst.C should be a different pointer than src.C")
}
if *dst.C != *src.C {
t.Fatalf("dst.C should be true")
}
}

View file

@ -14,43 +14,62 @@ import (
"time" "time"
) )
// The algorithm that this implementation uses does computational work
// only when tokens are removed from the bucket, and that work completes
// in short, bounded-constant time (Bucket.Wait benchmarks at 175ns on
// my laptop).
//
// Time is measured in equal measured ticks, a given interval
// (fillInterval) apart. On each tick a number of tokens (quantum) are
// added to the bucket.
//
// When any of the methods are called the bucket updates the number of
// tokens that are in the bucket, and it records the current tick
// number too. Note that it doesn't record the current time - by
// keeping things in units of whole ticks, it's easy to dish out tokens
// at exactly the right intervals as measured from the start time.
//
// This allows us to calculate the number of tokens that will be
// available at some time in the future with a few simple arithmetic
// operations.
//
// The main reason for being able to transfer multiple tokens on each tick
// is so that we can represent rates greater than 1e9 (the resolution of the Go
// time package) tokens per second, but it's also useful because
// it means we can easily represent situations like "a person gets
// five tokens an hour, replenished on the hour".
// Bucket represents a token bucket that fills at a predetermined rate. // Bucket represents a token bucket that fills at a predetermined rate.
// Methods on Bucket may be called concurrently. // Methods on Bucket may be called concurrently.
type Bucket struct { type Bucket struct {
startTime time.Time
capacity int64
quantum int64
fillInterval time.Duration
clock Clock clock Clock
// The mutex guards the fields following it. // startTime holds the moment when the bucket was
// first created and ticks began.
startTime time.Time
// capacity holds the overall capacity of the bucket.
capacity int64
// quantum holds how many tokens are added on
// each tick.
quantum int64
// fillInterval holds the interval between each tick.
fillInterval time.Duration
// mu guards the fields below it.
mu sync.Mutex mu sync.Mutex
// avail holds the number of available tokens // availableTokens holds the number of available
// in the bucket, as of availTick ticks from startTime. // tokens as of the associated latestTick.
// It will be negative when there are consumers // It will be negative when there are consumers
// waiting for tokens. // waiting for tokens.
avail int64 availableTokens int64
availTick int64
}
// Clock is used to inject testable fakes. // latestTick holds the latest tick for which
type Clock interface { // we know the number of tokens in the bucket.
Now() time.Time latestTick int64
Sleep(d time.Duration)
}
// realClock implements Clock in terms of standard time functions.
type realClock struct{}
// Now is identical to time.Now.
func (realClock) Now() time.Time {
return time.Now()
}
// Sleep is identical to time.Sleep.
func (realClock) Sleep(d time.Duration) {
time.Sleep(d)
} }
// NewBucket returns a new token bucket that fills at the // NewBucket returns a new token bucket that fills at the
@ -58,7 +77,7 @@ func (realClock) Sleep(d time.Duration) {
// maximum capacity. Both arguments must be // maximum capacity. Both arguments must be
// positive. The bucket is initially full. // positive. The bucket is initially full.
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
return NewBucketWithClock(fillInterval, capacity, realClock{}) return NewBucketWithClock(fillInterval, capacity, nil)
} }
// NewBucketWithClock is identical to NewBucket but injects a testable clock // NewBucketWithClock is identical to NewBucket but injects a testable clock
@ -77,18 +96,22 @@ const rateMargin = 0.01
// at high rates, the actual rate may be up to 1% different from the // at high rates, the actual rate may be up to 1% different from the
// specified rate. // specified rate.
func NewBucketWithRate(rate float64, capacity int64) *Bucket { func NewBucketWithRate(rate float64, capacity int64) *Bucket {
return NewBucketWithRateAndClock(rate, capacity, realClock{}) return NewBucketWithRateAndClock(rate, capacity, nil)
} }
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a // NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
// testable clock interface. // testable clock interface.
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket { func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
// Use the same bucket each time through the loop
// to save allocations.
tb := NewBucketWithQuantumAndClock(1, capacity, 1, clock)
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) { for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
fillInterval := time.Duration(1e9 * float64(quantum) / rate) fillInterval := time.Duration(1e9 * float64(quantum) / rate)
if fillInterval <= 0 { if fillInterval <= 0 {
continue continue
} }
tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock) tb.fillInterval = fillInterval
tb.quantum = quantum
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin { if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb return tb
} }
@ -111,12 +134,16 @@ func nextQuantum(q int64) int64 {
// the specification of the quantum size - quantum tokens // the specification of the quantum size - quantum tokens
// are added every fillInterval. // are added every fillInterval.
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket { func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{}) return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, nil)
} }
// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects // NewBucketWithQuantumAndClock is like NewBucketWithQuantum, but
// a testable clock interface. // also has a clock argument that allows clients to fake the passing
// of time. If clock is nil, the system clock will be used.
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket { func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
if clock == nil {
clock = realClock{}
}
if fillInterval <= 0 { if fillInterval <= 0 {
panic("token bucket fill interval is not > 0") panic("token bucket fill interval is not > 0")
} }
@ -129,10 +156,11 @@ func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum
return &Bucket{ return &Bucket{
clock: clock, clock: clock,
startTime: clock.Now(), startTime: clock.Now(),
latestTick: 0,
fillInterval: fillInterval,
capacity: capacity, capacity: capacity,
quantum: quantum, quantum: quantum,
avail: capacity, availableTokens: capacity,
fillInterval: fillInterval,
} }
} }
@ -166,6 +194,8 @@ const infinityDuration time.Duration = 0x7fffffffffffffff
// Note that if the request is irrevocable - there is no way to return // Note that if the request is irrevocable - there is no way to return
// tokens to the bucket once this method commits us to taking them. // tokens to the bucket once this method commits us to taking them.
func (tb *Bucket) Take(count int64) time.Duration { func (tb *Bucket) Take(count int64) time.Duration {
tb.mu.Lock()
defer tb.mu.Unlock()
d, _ := tb.take(tb.clock.Now(), count, infinityDuration) d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
return d return d
} }
@ -180,6 +210,8 @@ func (tb *Bucket) Take(count int64) time.Duration {
// wait until the tokens are actually available, and reports // wait until the tokens are actually available, and reports
// true. // true.
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) { func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.take(tb.clock.Now(), count, maxWait) return tb.take(tb.clock.Now(), count, maxWait)
} }
@ -187,6 +219,8 @@ func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Dura
// bucket. It returns the number of tokens removed, or zero if there are // bucket. It returns the number of tokens removed, or zero if there are
// no available tokens. It does not block. // no available tokens. It does not block.
func (tb *Bucket) TakeAvailable(count int64) int64 { func (tb *Bucket) TakeAvailable(count int64) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
return tb.takeAvailable(tb.clock.Now(), count) return tb.takeAvailable(tb.clock.Now(), count)
} }
@ -196,17 +230,14 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
if count <= 0 { if count <= 0 {
return 0 return 0
} }
tb.mu.Lock() tb.adjustavailableTokens(tb.currentTick(now))
defer tb.mu.Unlock() if tb.availableTokens <= 0 {
tb.adjust(now)
if tb.avail <= 0 {
return 0 return 0
} }
if count > tb.avail { if count > tb.availableTokens {
count = tb.avail count = tb.availableTokens
} }
tb.avail -= count tb.availableTokens -= count
return count return count
} }
@ -225,8 +256,8 @@ func (tb *Bucket) Available() int64 {
func (tb *Bucket) available(now time.Time) int64 { func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock() tb.mu.Lock()
defer tb.mu.Unlock() defer tb.mu.Unlock()
tb.adjust(now) tb.adjustavailableTokens(tb.currentTick(now))
return tb.avail return tb.availableTokens
} }
// Capacity returns the capacity that the bucket was created with. // Capacity returns the capacity that the bucket was created with.
@ -245,40 +276,69 @@ func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.
if count <= 0 { if count <= 0 {
return 0, true return 0, true
} }
tb.mu.Lock()
defer tb.mu.Unlock()
currentTick := tb.adjust(now) tick := tb.currentTick(now)
avail := tb.avail - count tb.adjustavailableTokens(tick)
avail := tb.availableTokens - count
if avail >= 0 { if avail >= 0 {
tb.avail = avail tb.availableTokens = avail
return 0, true return 0, true
} }
// Round up the missing tokens to the nearest multiple // Round up the missing tokens to the nearest multiple
// of quantum - the tokens won't be available until // of quantum - the tokens won't be available until
// that tick. // that tick.
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
// endTick holds the tick when all the requested tokens will
// become available.
endTick := tick + (-avail+tb.quantum-1)/tb.quantum
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval) endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
waitTime := endTime.Sub(now) waitTime := endTime.Sub(now)
if waitTime > maxWait { if waitTime > maxWait {
return 0, false return 0, false
} }
tb.avail = avail tb.availableTokens = avail
return waitTime, true return waitTime, true
} }
// adjust adjusts the current bucket capacity based on the current time. // currentTick returns the current time tick, measured
// It returns the current tick. // from tb.startTime.
func (tb *Bucket) adjust(now time.Time) (currentTick int64) { func (tb *Bucket) currentTick(now time.Time) int64 {
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval) return int64(now.Sub(tb.startTime) / tb.fillInterval)
}
if tb.avail >= tb.capacity { // adjustavailableTokens adjusts the current number of tokens
// available in the bucket at the given time, which must
// be in the future (positive) with respect to tb.latestTick.
func (tb *Bucket) adjustavailableTokens(tick int64) {
if tb.availableTokens >= tb.capacity {
return return
} }
tb.avail += (currentTick - tb.availTick) * tb.quantum tb.availableTokens += (tick - tb.latestTick) * tb.quantum
if tb.avail > tb.capacity { if tb.availableTokens > tb.capacity {
tb.avail = tb.capacity tb.availableTokens = tb.capacity
} }
tb.availTick = currentTick tb.latestTick = tick
return return
} }
// Clock represents the passage of time in a way that
// can be faked out for tests.
type Clock interface {
// Now returns the current time.
Now() time.Time
// Sleep sleeps for at least the given duration.
Sleep(d time.Duration)
}
// realClock implements Clock in terms of standard time functions.
type realClock struct{}
// Now implements Clock.Now by calling time.Now.
func (realClock) Now() time.Time {
return time.Now()
}
// Now implements Clock.Sleep by calling time.Sleep.
func (realClock) Sleep(d time.Duration) {
time.Sleep(d)
}

View file

@ -344,7 +344,7 @@ func checkRate(c *gc.C, rate float64) {
} }
} }
func (rateLimitSuite) TestNewWithRate(c *gc.C) { func (rateLimitSuite) NewBucketWithRate(c *gc.C) {
for rate := float64(1); rate < 1e6; rate += 7 { for rate := float64(1); rate < 1e6; rate += 7 {
checkRate(c, rate) checkRate(c, rate)
} }
@ -357,6 +357,7 @@ func (rateLimitSuite) TestNewWithRate(c *gc.C) {
0.9e8, 0.9e8,
3e12, 3e12,
4e18, 4e18,
float64(1<<63 - 1),
} { } {
checkRate(c, rate) checkRate(c, rate)
checkRate(c, rate/3) checkRate(c, rate/3)
@ -387,3 +388,9 @@ func BenchmarkWait(b *testing.B) {
tb.Wait(1) tb.Wait(1)
} }
} }
func BenchmarkNewBucket(b *testing.B) {
for i := b.N - 1; i >= 0; i-- {
NewBucketWithRate(4e18, 1<<62)
}
}

Some files were not shown because too many files have changed in this diff Show more