Update apache/thrift to 0.11.0 and remove pinning (#1317)

The `apache/thrift` recently released a new version of `0.11.0`
several days ago. This release is compatible with other packages
and as such, there is no need to pinning the `apache/thrift`
to `master` anymore in Gopkg.toml.

This fix removes the pinning of `apache/thrift` in Gopkg.toml,
and updates all dependencies of coredns.

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2017-12-18 11:50:56 -06:00 committed by GitHub
parent ba4e77672c
commit 4dd40a292c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6992 changed files with 30842 additions and 1995023 deletions

54
Gopkg.lock generated
View file

@ -16,14 +16,14 @@
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "240fd146ce68bcafb034cc5dc977229ffbafa8ea"
version = "v1.14.0"
revision = "3b1b38866a79f06deddf0487d5c27ba0697ccd65"
version = "v1.15.0"
[[projects]]
branch = "master"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
revision = "95d5fb3a1e38125b9eabcbe9cda1a6c7bbe3e93d"
revision = "327ebb6c2b6df8bf075da02ef45a2a034e9b79ba"
version = "0.11.0"
[[projects]]
name = "github.com/asaskevich/govalidator"
@ -40,8 +40,8 @@
[[projects]]
name = "github.com/coreos/etcd"
packages = ["client","pkg/pathutil","pkg/srv","pkg/types","version"]
revision = "f1d7dd87da3e8feab4aaf675b8e29c6a5ed5f58b"
version = "v3.2.9"
revision = "1e1dbb23924672c6cd72c62ee0db2b45f778da71"
version = "v3.2.11"
[[projects]]
name = "github.com/coreos/go-semver"
@ -82,8 +82,8 @@
[[projects]]
name = "github.com/eapache/queue"
packages = ["."]
revision = "ded5959c0d4e360646dc9e9908cff48666781367"
version = "v1.0.2"
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
name = "github.com/emicklei/go-restful"
@ -119,7 +119,7 @@
branch = "master"
name = "github.com/go-openapi/analysis"
packages = ["."]
revision = "8ed83f2ea9f00f945516462951a288eaa68bf0d6"
revision = "2bbaa248df987bc6fbd0210da05071e111dec40c"
[[projects]]
branch = "master"
@ -143,13 +143,13 @@
branch = "master"
name = "github.com/go-openapi/loads"
packages = ["."]
revision = "c3e1ca4c0b6160cac10aeef7e8b425cc95b9c820"
revision = "2a2b323bab96e6b1fdee110e57d959322446e9c9"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "a4fa9574c7aa73b2fc54e251eb9524d0482bb592"
revision = "01738944bdee0f26bf66420c5b17d54cfdf55341"
[[projects]]
branch = "master"
@ -179,7 +179,7 @@
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9"
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
[[projects]]
branch = "master"
@ -197,7 +197,7 @@
branch = "master"
name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"]
revision = "01f8541d537215b2867e2745a1eb85c58c7c6b81"
revision = "0e7658f8ee99ee5aa683e2a032b8880091b7a055"
[[projects]]
branch = "master"
@ -233,7 +233,7 @@
branch = "master"
name = "github.com/mailru/easyjson"
packages = ["buffer","jlexer","jwriter"]
revision = "5f62e4f3afa2f576dc86531b7df4d966b19ef8f8"
revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
@ -287,25 +287,25 @@
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
revision = "e3fb1a1acd7605367a2b378bc2e2f893c05174b7"
revision = "2e54d0b93cba2fd133edc32211dcc32c06ef72ca"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [".","xfs"]
revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa"
revision = "f98634e408857669d61064b283c4cde240622865"
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
revision = "e181e095bae94582363434144c61a9653aff6e50"
[[projects]]
name = "github.com/spf13/pflag"
@ -322,31 +322,31 @@
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "6a293f2d4b14b8e6d3f0539e383f6d0d30fce3fd"
revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "1e2299c37cc91a509f1b12369872d27be0ce98a6"
revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm","width"]
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
revision = "d5a9226ed7dd70cade6ccae9d37517fe14dd9fee"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "11c7f9e547da6db876260ce49ea7536985904c9b"
revision = "a8101f21cf983e773d0c1133ebc5424792003214"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","stats","status","tap","transport"]
revision = "5ffe3083946d5603a0578721101dc8165b1d5b5f"
version = "v1.7.2"
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "e687fa4e6424368ece6e4fe727cea2c806a0fcb4"
version = "v1.8.2"
[[projects]]
name = "gopkg.in/inf.v0"
@ -364,7 +364,7 @@
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
revision = "287cf08546ab5e7e37d55a84f7ed3fd1db036de5"
[[projects]]
name = "k8s.io/apimachinery"
@ -380,6 +380,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "802b3c477e5c45a1fc7283cfc3e9ed03c4dffb4bfe9fb406d252a8d8194975d1"
inputs-digest = "f6880c7a6161931b90bc7bde132559b79d0a192c1879b3f095107391e2e45c14"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -13,10 +13,6 @@ ignored = [
"golang.org/x/net/trace",
]
[[override]]
name = "github.com/apache/thrift"
branch = "master"
[[override]]
name = "github.com/ugorji/go"
revision = "8c0409fcbb70099c748d71f714529204975f6c3f"

View file

@ -1,9 +0,0 @@
PASS
BenchmarkSafe 500000 6131 ns/op
BenchmarkUsuallySafe 200000 7864 ns/op
BenchmarkUnsafe 100000 28560 ns/op
BenchmarkAllDWORD 50000 38722 ns/op
BenchmarkAllOctal 50000 40941 ns/op
BenchmarkAllHex 50000 44063 ns/op
BenchmarkAllCombined 50000 33613 ns/op
ok github.com/PuerkitoBio/purell 17.404s

View file

@ -1,31 +0,0 @@
# Contributing
Contributions are always welcome, both reporting issues and submitting pull requests!
### Reporting issues
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
Also, please include the following information about your environment, so we can help you faster:
- What version of Kafka are you using?
- What version of Go are you using?
- What are the values of your Producer/Consumer/Client configuration?
### Submitting pull requests
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
- If you plan to work on something major, please open an issue to discuss the design first.
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions

View file

@ -1,20 +0,0 @@
##### Versions
*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.*
Sarama Version:
Kafka Version:
Go Version:
##### Configuration
What configuration values are you using for Sarama and Kafka?
##### Logs
When filing an issue please provide logs from Sarama and Kafka if at all
possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug
output.
##### Problem Description

View file

@ -12,9 +12,9 @@ env:
- KAFKA_HOSTNAME=localhost
- DEBUG=true
matrix:
- KAFKA_VERSION=0.9.0.1
- KAFKA_VERSION=0.10.2.1
- KAFKA_VERSION=0.11.0.1
- KAFKA_VERSION=0.11.0.2
- KAFKA_VERSION=1.0.0
before_install:
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}

View file

@ -1,5 +1,31 @@
# Changelog
#### Version 1.15.0 (2017-12-08)
New Features:
- Claim official support for Kafka 1.0, though it did already work
([#984](https://github.com/Shopify/sarama/pull/984)).
- Helper methods for Kafka version numbers to/from strings
([#989](https://github.com/Shopify/sarama/pull/989)).
- Implement CreatePartitions request/response
([#985](https://github.com/Shopify/sarama/pull/985)).
Improvements:
- Add error codes 45-60
([#986](https://github.com/Shopify/sarama/issues/986)).
Bug Fixes:
- Fix slow consuming for certain Kafka 0.11/1.0 configurations
([#982](https://github.com/Shopify/sarama/pull/982)).
- Correctly determine when a FetchResponse contains the new message format
([#990](https://github.com/Shopify/sarama/pull/990)).
- Fix producing with multiple headers
([#996](https://github.com/Shopify/sarama/pull/996)).
- Fix handling of truncated record batches
([#998](https://github.com/Shopify/sarama/pull/998)).
- Fix leaking metrics when closing brokers
([#991](https://github.com/Shopify/sarama/pull/991)).
#### Version 1.14.0 (2017-11-13)
New Features:

View file

@ -21,7 +21,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.9 through 1.7, and Kafka 0.11 through 0.9, although older releases are
Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.

View file

@ -178,6 +178,13 @@ func (b *Broker) Close() error {
b.done = nil
b.responses = nil
if b.id >= 0 {
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
}
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {

View file

@ -77,10 +77,6 @@ func TestSimpleBrokerCommunication(t *testing.T) {
t.Fatal(err)
}
tt.runner(t, broker)
err = broker.Close()
if err != nil {
t.Error(err)
}
// Wait up to 500 ms for the remote broker to process the request and
// notify us about the metrics
timeout := 500 * time.Millisecond
@ -91,6 +87,10 @@ func TestSimpleBrokerCommunication(t *testing.T) {
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
}
mb.Close()
err = broker.Close()
if err != nil {
t.Error(err)
}
}
}

View file

@ -519,11 +519,10 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe
return messages, nil
}
func (child *partitionConsumer) parseRecords(block *FetchResponseBlock) ([]*ConsumerMessage, error) {
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
var incomplete bool
prelude := true
batch := block.Records.recordBatch
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
@ -546,11 +545,6 @@ func (child *partitionConsumer) parseRecords(block *FetchResponseBlock) ([]*Cons
} else {
incomplete = true
}
if child.offset > block.LastStableOffset {
// We reached the end of closed transactions
break
}
}
if incomplete || len(messages) == 0 {
@ -604,10 +598,10 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
return nil, err
}
if response.Version < 4 {
if block.Records.recordsType == legacyRecords {
return child.parseMessages(block.Records.msgSet)
}
return child.parseRecords(block)
return child.parseRecords(block.Records.recordBatch)
}
// brokerConsumer

View file

@ -435,6 +435,49 @@ func TestConsumerExtraOffsets(t *testing.T) {
}
}
func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
// Given
fetchResponse1 := &FetchResponse{Version: 4}
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
cfg := NewConfig()
cfg.Version = V0_11_0_0
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{}
fetchResponse2.Version = 4
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(1).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-consumer.Messages(), 1)
assertMessageOffset(t, <-consumer.Messages(), 2)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// It is fine if offsets of fetched messages are not sequential (although
// strictly increasing!).
func TestConsumerNonSequentialOffsets(t *testing.T) {

View file

@ -0,0 +1,121 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,50 @@
package sarama
import (
"testing"
"time"
)
var (
createPartitionRequestNoAssignment = []byte{
0, 0, 0, 1, // one topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
255, 255, 255, 255, // no assignments
0, 0, 0, 100, // timeout
0, // validate only = false
}
createPartitionRequestAssignment = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
0, 0, 0, 2,
0, 0, 0, 2,
0, 0, 0, 2, 0, 0, 0, 3,
0, 0, 0, 2,
0, 0, 0, 3, 0, 0, 0, 1,
0, 0, 0, 100,
1, // validate only = true
}
)
func TestCreatePartitionsRequest(t *testing.T) {
req := &CreatePartitionsRequest{
TopicPartitions: map[string]*TopicPartition{
"topic": &TopicPartition{
Count: 3,
},
},
Timeout: 100 * time.Millisecond,
}
buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment)
testRequestDecode(t, "no assignment", req, buf)
req.ValidateOnly = true
req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}}
buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment)
testRequestDecode(t, "assignment", req, buf)
}

View file

@ -0,0 +1,94 @@
package sarama
import "time"
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,52 @@
package sarama
import (
"reflect"
"testing"
"time"
)
var (
createPartitionResponseSuccess = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, // no error
255, 255, // no error message
}
createPartitionResponseFail = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 37, // partition error
0, 5, 'e', 'r', 'r', 'o', 'r',
}
)
func TestCreatePartitionsResponse(t *testing.T) {
resp := &CreatePartitionsResponse{
ThrottleTime: 100 * time.Millisecond,
TopicPartitionErrors: map[string]*TopicPartitionError{
"topic": &TopicPartitionError{},
},
}
testResponse(t, "success", resp, createPartitionResponseSuccess)
decodedresp := new(CreatePartitionsResponse)
testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
errMsg := "error"
resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions
resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg
testResponse(t, "with errors", resp, createPartitionResponseFail)
decodedresp = new(CreatePartitionsResponse)
testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
}

View file

@ -2,13 +2,9 @@ name: sarama
up:
- go:
version: '1.8'
version: '1.9'
commands:
test:
run: make test
desc: 'run unit tests'
packages:
- git@github.com:Shopify/dev-shopify.git

View file

@ -71,52 +71,68 @@ type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
)
func (err KError) Error() string {
@ -215,6 +231,38 @@ func (err KError) Error() string {
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)

View file

@ -1,9 +0,0 @@
# Sarama examples
This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
#### HTTP server
[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.

View file

@ -1,2 +0,0 @@
http_server
http_server.test

View file

@ -1,7 +0,0 @@
# HTTP server example
This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.

View file

@ -1,247 +0,0 @@
package main
import (
"github.com/Shopify/sarama"
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
var (
addr = flag.String("addr", ":8080", "The address to bind to")
brokers = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
verbose = flag.Bool("verbose", false, "Turn on Sarama logging")
certFile = flag.String("certificate", "", "The optional certificate file for client authentication")
keyFile = flag.String("key", "", "The optional key file for client authentication")
caFile = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
)
func main() {
flag.Parse()
if *verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
if *brokers == "" {
flag.PrintDefaults()
os.Exit(1)
}
brokerList := strings.Split(*brokers, ",")
log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
server := &Server{
DataCollector: newDataCollector(brokerList),
AccessLogProducer: newAccessLogProducer(brokerList),
}
defer func() {
if err := server.Close(); err != nil {
log.Println("Failed to close server", err)
}
}()
log.Fatal(server.Run(*addr))
}
func createTlsConfiguration() (t *tls.Config) {
if *certFile != "" && *keyFile != "" && *caFile != "" {
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
log.Fatal(err)
}
caCert, err := ioutil.ReadFile(*caFile)
if err != nil {
log.Fatal(err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
t = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: *verifySsl,
}
}
// will be nil by default if nothing is provided
return t
}
type Server struct {
DataCollector sarama.SyncProducer
AccessLogProducer sarama.AsyncProducer
}
func (s *Server) Close() error {
if err := s.DataCollector.Close(); err != nil {
log.Println("Failed to shut down data collector cleanly", err)
}
if err := s.AccessLogProducer.Close(); err != nil {
log.Println("Failed to shut down access log producer cleanly", err)
}
return nil
}
func (s *Server) Handler() http.Handler {
return s.withAccessLog(s.collectQueryStringData())
}
func (s *Server) Run(addr string) error {
httpServer := &http.Server{
Addr: addr,
Handler: s.Handler(),
}
log.Printf("Listening for requests on %s...\n", addr)
return httpServer.ListenAndServe()
}
func (s *Server) collectQueryStringData() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
// We are not setting a message key, which means that all messages will
// be distributed randomly over the different partitions.
partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
Topic: "important",
Value: sarama.StringEncoder(r.URL.RawQuery),
})
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Failed to store your data:, %s", err)
} else {
// The tuple (topic, partition, offset) can be used as a unique identifier
// for a message in a Kafka cluster.
fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
}
})
}
type accessLogEntry struct {
Method string `json:"method"`
Host string `json:"host"`
Path string `json:"path"`
IP string `json:"ip"`
ResponseTime float64 `json:"response_time"`
encoded []byte
err error
}
func (ale *accessLogEntry) ensureEncoded() {
if ale.encoded == nil && ale.err == nil {
ale.encoded, ale.err = json.Marshal(ale)
}
}
func (ale *accessLogEntry) Length() int {
ale.ensureEncoded()
return len(ale.encoded)
}
func (ale *accessLogEntry) Encode() ([]byte, error) {
ale.ensureEncoded()
return ale.encoded, ale.err
}
func (s *Server) withAccessLog(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
started := time.Now()
next.ServeHTTP(w, r)
entry := &accessLogEntry{
Method: r.Method,
Host: r.Host,
Path: r.RequestURI,
IP: r.RemoteAddr,
ResponseTime: float64(time.Since(started)) / float64(time.Second),
}
// We will use the client's IP address as key. This will cause
// all the access log entries of the same IP address to end up
// on the same partition.
s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
Topic: "access_log",
Key: sarama.StringEncoder(r.RemoteAddr),
Value: entry,
}
})
}
func newDataCollector(brokerList []string) sarama.SyncProducer {
// For the data collector, we are looking for strong consistency semantics.
// Because we don't change the flush settings, sarama will try to produce messages
// as fast as possible to keep latency low.
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message
config.Producer.Return.Successes = true
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Config = tlsConfig
config.Net.TLS.Enable = true
}
// On the broker side, you may want to change the following settings to get
// stronger consistency guarantees:
// - For your broker, set `unclean.leader.election.enable` to false
// - For the topic, you could increase `min.insync.replicas`.
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
return producer
}
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
// For the access log, we are looking for AP semantics, with high throughput.
// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
config := sarama.NewConfig()
tlsConfig := createTlsConfiguration()
if tlsConfig != nil {
config.Net.TLS.Enable = true
config.Net.TLS.Config = tlsConfig
}
config.Producer.RequiredAcks = sarama.WaitForLocal // Only wait for the leader to ack
config.Producer.Compression = sarama.CompressionSnappy // Compress messages
config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
producer, err := sarama.NewAsyncProducer(brokerList, config)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
// We will just log to STDOUT if we're not able to produce messages.
// Note: messages will only be returned here after all retry attempts are exhausted.
go func() {
for err := range producer.Errors() {
log.Println("Failed to write access log entry:", err)
}
}()
return producer
}

View file

@ -1,109 +0,0 @@
package main
import (
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/Shopify/sarama"
"github.com/Shopify/sarama/mocks"
)
// In normal operation, we expect one access log entry,
// and one data collector entry. Let's assume both will succeed.
// We should return a HTTP 200 status.
func TestCollectSuccessfully(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndSucceed()
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
// Now, use dependency injection to use the mocks.
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
// The Server's Close call is important; it will call Close on
// the two mock producers, which will then validate whether all
// expectations are resolved.
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 200 {
t.Errorf("Expected HTTP status 200, found %d", res.Code)
}
if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
t.Error("Unexpected response body", res.Body)
}
}
// Now, let's see if we handle the case of not being able to produce
// to the data collector properly. In this case we should return a 500 status.
func TestCollectionFailure(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 500 {
t.Errorf("Expected HTTP status 500, found %d", res.Code)
}
}
// We don't expect any data collector calls because the path is wrong,
// so we are not setting any expectations on the dataCollectorMock. It
// will still generate an access log entry though.
func TestWrongPath(t *testing.T) {
dataCollectorMock := mocks.NewSyncProducer(t, nil)
accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
accessLogProducerMock.ExpectInputAndSucceed()
s := &Server{
DataCollector: dataCollectorMock,
AccessLogProducer: accessLogProducerMock,
}
defer safeClose(t, s)
req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
if err != nil {
t.Fatal(err)
}
res := httptest.NewRecorder()
s.Handler().ServeHTTP(res, req)
if res.Code != 404 {
t.Errorf("Expected HTTP status 404, found %d", res.Code)
}
}
func safeClose(t *testing.T, o io.Closer) {
if err := o.Close(); err != nil {
t.Error(err)
}
}

View file

@ -79,18 +79,11 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error)
if err != nil {
return err
}
var records Records
if version >= 4 {
records = newDefaultRecords(nil)
} else {
records = newLegacyRecords(nil)
}
if recordsSize > 0 {
if err = records.decode(recordsDecoder); err != nil {
if err = b.Records.decode(recordsDecoder); err != nil {
return err
}
}
b.Records = records
return nil
}

View file

@ -61,8 +61,28 @@ var (
0x06, 0x05, 0x06, 0x07,
0x02,
0x06, 0x08, 0x09, 0x0A,
0x04, 0x0B, 0x0C,
}
0x04, 0x0B, 0x0C}
oneMessageFetchResponseV4 = []byte{
0x00, 0x00, 0x00, 0x00, // ThrottleTime
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0x05, // Partition
0x00, 0x01, // Error
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
)
func TestEmptyFetchResponse(t *testing.T) {
@ -173,3 +193,56 @@ func TestOneRecordFetchResponse(t *testing.T) {
t.Error("Decoding produced incorrect record value.")
}
}
func TestOneMessageFetchResponseV4(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.Records.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
msgBlock := block.Records.msgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
msg := msgBlock.Msg
if msg.Codec != CompressionNone {
t.Error("Decoding produced incorrect message compression.")
}
if msg.Key != nil {
t.Error("Decoding produced message key where there was none.")
}
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
t.Error("Decoding produced incorrect message value.")
}
}

View file

@ -1,13 +0,0 @@
# sarama/mocks
The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
You can use them to test your sarama applications using dependency injection.
The following mock objects are available:
- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
and the results will be reported to the `*testing.T` object you provided when creating the mock.

View file

@ -1,174 +0,0 @@
package mocks
import (
"sync"
"github.com/Shopify/sarama"
)
// AsyncProducer implements sarama's Producer interface for testing purposes.
// Before you can send messages to it's Input channel, you have to set expectations
// so it knows how to handle the input; it returns an error if the number of messages
// received is bigger then the number of expectations set. You can also set a
// function in each expectation so that the message value is checked by this function
// and an error is returned if the match fails.
type AsyncProducer struct {
l sync.Mutex
t ErrorReporter
expectations []*producerExpectation
closed chan struct{}
input chan *sarama.ProducerMessage
successes chan *sarama.ProducerMessage
errors chan *sarama.ProducerError
lastOffset int64
}
// NewAsyncProducer instantiates a new Producer mock. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is used to determine whether it
// should ack successes on the Successes channel.
func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
if config == nil {
config = sarama.NewConfig()
}
mp := &AsyncProducer{
t: t,
closed: make(chan struct{}, 0),
expectations: make([]*producerExpectation, 0),
input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
errors: make(chan *sarama.ProducerError, config.ChannelBufferSize),
}
go func() {
defer func() {
close(mp.successes)
close(mp.errors)
}()
for msg := range mp.input {
mp.l.Lock()
if mp.expectations == nil || len(mp.expectations) == 0 {
mp.expectations = nil
mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
} else {
expectation := mp.expectations[0]
mp.expectations = mp.expectations[1:]
if expectation.CheckFunction != nil {
if val, err := msg.Value.Encode(); err != nil {
mp.t.Errorf("Input message encoding failed: %s", err.Error())
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
} else {
err = expectation.CheckFunction(val)
if err != nil {
mp.t.Errorf("Check function returned an error: %s", err.Error())
mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
}
}
}
if expectation.Result == errProduceSuccess {
mp.lastOffset++
if config.Producer.Return.Successes {
msg.Offset = mp.lastOffset
mp.successes <- msg
}
} else {
if config.Producer.Return.Errors {
mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
}
}
}
mp.l.Unlock()
}
mp.l.Lock()
if len(mp.expectations) > 0 {
mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
}
mp.l.Unlock()
close(mp.closed)
}()
return mp
}
////////////////////////////////////////////////
// Implement Producer interface
////////////////////////////////////////////////
// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
// By closing a mock producer, you also tell it that no more input will be provided, so it will
// write an error to the test state if there's any remaining expectations.
func (mp *AsyncProducer) AsyncClose() {
close(mp.input)
}
// Close corresponds with the Close method of sarama's Producer implementation.
// By closing a mock producer, you also tell it that no more input will be provided, so it will
// write an error to the test state if there's any remaining expectations.
func (mp *AsyncProducer) Close() error {
mp.AsyncClose()
<-mp.closed
return nil
}
// Input corresponds with the Input method of sarama's Producer implementation.
// You have to set expectations on the mock producer before writing messages to the Input
// channel, so it knows how to handle them. If there is no more remaining expectations and
// a messages is written to the Input channel, the mock producer will write an error to the test
// state object.
func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
return mp.input
}
// Successes corresponds with the Successes method of sarama's Producer implementation.
func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
return mp.successes
}
// Errors corresponds with the Errors method of sarama's Producer implementation.
func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
return mp.errors
}
////////////////////////////////////////////////
// Setting expectations
////////////////////////////////////////////////
// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
// will be provided on the input channel. The mock producer will call the given function to check
// the message value. If an error is returned it will be made available on the Errors channel
// otherwise the mock will handle the message as if it produced successfully, i.e. it will make
// it available on the Successes channel if the Producer.Return.Successes setting is set to true.
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) {
mp.l.Lock()
defer mp.l.Unlock()
mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
}
// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message
// will be provided on the input channel. The mock producer will first call the given function to
// check the message value. If an error is returned it will be made available on the Errors channel
// otherwise the mock will handle the message as if it failed to produce successfully. This means
// it will make a ProducerError available on the Errors channel.
func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) {
mp.l.Lock()
defer mp.l.Unlock()
mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
}
// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
// on the input channel. The mock producer will handle the message as if it is produced successfully,
// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
// is set to true.
func (mp *AsyncProducer) ExpectInputAndSucceed() {
mp.ExpectInputWithCheckerFunctionAndSucceed(nil)
}
// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
// on the input channel. The mock producer will handle the message as if it failed to produce
// successfully. This means it will make a ProducerError available on the Errors channel.
func (mp *AsyncProducer) ExpectInputAndFail(err error) {
mp.ExpectInputWithCheckerFunctionAndFail(nil, err)
}

View file

@ -1,132 +0,0 @@
package mocks
import (
"errors"
"fmt"
"regexp"
"strings"
"testing"
"github.com/Shopify/sarama"
)
func generateRegexpChecker(re string) func([]byte) error {
return func(val []byte) error {
matched, err := regexp.MatchString(re, string(val))
if err != nil {
return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error())
}
if !matched {
return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re)
}
return nil
}
}
type testReporterMock struct {
errors []string
}
func newTestReporterMock() *testReporterMock {
return &testReporterMock{errors: make([]string, 0)}
}
func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
}
func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
var mp interface{} = &AsyncProducer{}
if _, ok := mp.(sarama.AsyncProducer); !ok {
t.Error("The mock producer should implement the sarama.Producer interface.")
}
}
func TestProducerReturnsExpectationsToChannels(t *testing.T) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
mp := NewAsyncProducer(t, config)
mp.ExpectInputAndSucceed()
mp.ExpectInputAndSucceed()
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
msg1 := <-mp.Successes()
msg2 := <-mp.Successes()
err1 := <-mp.Errors()
if msg1.Topic != "test 1" {
t.Error("Expected message 1 to be returned first")
}
if msg2.Topic != "test 2" {
t.Error("Expected message 2 to be returned second")
}
if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
t.Error("Expected message 3 to be returned as error")
}
if err := mp.Close(); err != nil {
t.Error(err)
}
}
func TestProducerWithTooFewExpectations(t *testing.T) {
trm := newTestReporterMock()
mp := NewAsyncProducer(trm, nil)
mp.ExpectInputAndSucceed()
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
if err := mp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestProducerWithTooManyExpectations(t *testing.T) {
trm := newTestReporterMock()
mp := NewAsyncProducer(trm, nil)
mp.ExpectInputAndSucceed()
mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
if err := mp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestProducerWithCheckerFunction(t *testing.T) {
trm := newTestReporterMock()
mp := NewAsyncProducer(trm, nil)
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if err := mp.Close(); err != nil {
t.Error(err)
}
if len(mp.Errors()) != 1 {
t.Error("Expected to report an error")
}
err1 := <-mp.Errors()
if !strings.HasPrefix(err1.Err.Error(), "No match") {
t.Error("Expected to report a value check error, found: ", err1.Err)
}
}

View file

@ -1,315 +0,0 @@
package mocks
import (
"sync"
"sync/atomic"
"github.com/Shopify/sarama"
)
// Consumer implements sarama's Consumer interface for testing purposes.
// Before you can start consuming from this consumer, you have to register
// topic/partitions using ExpectConsumePartition, and set expectations on them.
type Consumer struct {
l sync.Mutex
t ErrorReporter
config *sarama.Config
partitionConsumers map[string]map[int32]*PartitionConsumer
metadata map[string][]int32
}
// NewConsumer returns a new mock Consumer instance. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument can be set to nil.
func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
if config == nil {
config = sarama.NewConfig()
}
c := &Consumer{
t: t,
config: config,
partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
}
return c
}
///////////////////////////////////////////////////
// Consumer interface implementation
///////////////////////////////////////////////////
// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
// Before you can start consuming a partition, you have to set expectations on it using
// ExpectConsumePartition. You can only consume a partition once per consumer.
func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
c.l.Lock()
defer c.l.Unlock()
if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
c.t.Errorf("No expectations set for %s/%d", topic, partition)
return nil, errOutOfExpectations
}
pc := c.partitionConsumers[topic][partition]
if pc.consumed {
return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
}
if pc.offset != AnyOffset && pc.offset != offset {
c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
}
pc.consumed = true
return pc, nil
}
// Topics returns a list of topics, as registered with SetMetadata
func (c *Consumer) Topics() ([]string, error) {
c.l.Lock()
defer c.l.Unlock()
if c.metadata == nil {
c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
return nil, sarama.ErrOutOfBrokers
}
var result []string
for topic := range c.metadata {
result = append(result, topic)
}
return result, nil
}
// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
func (c *Consumer) Partitions(topic string) ([]int32, error) {
c.l.Lock()
defer c.l.Unlock()
if c.metadata == nil {
c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
return nil, sarama.ErrOutOfBrokers
}
if c.metadata[topic] == nil {
return nil, sarama.ErrUnknownTopicOrPartition
}
return c.metadata[topic], nil
}
func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 {
c.l.Lock()
defer c.l.Unlock()
hwms := make(map[string]map[int32]int64, len(c.partitionConsumers))
for topic, partitionConsumers := range c.partitionConsumers {
hwm := make(map[int32]int64, len(partitionConsumers))
for partition, pc := range partitionConsumers {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
// Close implements the Close method from the sarama.Consumer interface. It will close
// all registered PartitionConsumer instances.
func (c *Consumer) Close() error {
c.l.Lock()
defer c.l.Unlock()
for _, partitions := range c.partitionConsumers {
for _, partitionConsumer := range partitions {
_ = partitionConsumer.Close()
}
}
return nil
}
///////////////////////////////////////////////////
// Expectation API
///////////////////////////////////////////////////
// SetTopicMetadata sets the clusters topic/partition metadata,
// which will be returned by Topics() and Partitions().
func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
c.l.Lock()
defer c.l.Unlock()
c.metadata = metadata
}
// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
// The registered PartitionConsumer will be returned, so you can set expectations
// on it using method chaining. Once a topic/partition is registered, you are
// expected to start consuming it using ConsumePartition. If that doesn't happen,
// an error will be written to the error reporter once the mock consumer is closed. It will
// also expect that the
func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
c.l.Lock()
defer c.l.Unlock()
if c.partitionConsumers[topic] == nil {
c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
}
if c.partitionConsumers[topic][partition] == nil {
c.partitionConsumers[topic][partition] = &PartitionConsumer{
t: c.t,
topic: topic,
partition: partition,
offset: offset,
messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
}
}
return c.partitionConsumers[topic][partition]
}
///////////////////////////////////////////////////
// PartitionConsumer mock type
///////////////////////////////////////////////////
// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
// Errors and Messages channel, you should specify what values will be provided on these
// channels using YieldMessage and YieldError.
type PartitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
l sync.Mutex
t ErrorReporter
topic string
partition int32
offset int64
messages chan *sarama.ConsumerMessage
errors chan *sarama.ConsumerError
singleClose sync.Once
consumed bool
errorsShouldBeDrained bool
messagesShouldBeDrained bool
}
///////////////////////////////////////////////////
// PartitionConsumer interface implementation
///////////////////////////////////////////////////
// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) AsyncClose() {
pc.singleClose.Do(func() {
close(pc.messages)
close(pc.errors)
})
}
// Close implements the Close method from the sarama.PartitionConsumer interface. It will
// verify whether the partition consumer was actually started.
func (pc *PartitionConsumer) Close() error {
if !pc.consumed {
pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
return errPartitionConsumerNotStarted
}
if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
}
if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
}
pc.AsyncClose()
var (
closeErr error
wg sync.WaitGroup
)
wg.Add(1)
go func() {
defer wg.Done()
var errs = make(sarama.ConsumerErrors, 0)
for err := range pc.errors {
errs = append(errs, err)
}
if len(errs) > 0 {
closeErr = errs
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for range pc.messages {
// drain
}
}()
wg.Wait()
return closeErr
}
// Errors implements the Errors method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
return pc.errors
}
// Messages implements the Messages method from the sarama.PartitionConsumer interface.
func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
return pc.messages
}
func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
}
///////////////////////////////////////////////////
// Expectation API
///////////////////////////////////////////////////
// YieldMessage will yield a messages Messages channel of this partition consumer
// when it is consumed. By default, the mock consumer will not verify whether this
// message was consumed from the Messages channel, because there are legitimate
// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
// verify that the channel is empty on close.
func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
pc.l.Lock()
defer pc.l.Unlock()
msg.Topic = pc.topic
msg.Partition = pc.partition
msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
pc.messages <- msg
}
// YieldError will yield an error on the Errors channel of this partition consumer
// when it is consumed. By default, the mock consumer will not verify whether this error was
// consumed from the Errors channel, because there are legitimate reasons for this
// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
// the channel is empty on close.
func (pc *PartitionConsumer) YieldError(err error) {
pc.errors <- &sarama.ConsumerError{
Topic: pc.topic,
Partition: pc.partition,
Err: err,
}
}
// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
// that the messages channel will be fully drained when Close is called. If this
// expectation is not met, an error is reported to the error reporter.
func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
pc.messagesShouldBeDrained = true
}
// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
// that the errors channel will be fully drained when Close is called. If this
// expectation is not met, an error is reported to the error reporter.
func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
pc.errorsShouldBeDrained = true
}

View file

@ -1,249 +0,0 @@
package mocks
import (
"sort"
"testing"
"github.com/Shopify/sarama"
)
func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
var c interface{} = &Consumer{}
if _, ok := c.(sarama.Consumer); !ok {
t.Error("The mock consumer should implement the sarama.Consumer interface.")
}
var pc interface{} = &PartitionConsumer{}
if _, ok := pc.(sarama.PartitionConsumer); !ok {
t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
}
}
func TestConsumerHandlesExpectations(t *testing.T) {
consumer := NewConsumer(t, nil)
defer func() {
if err := consumer.Close(); err != nil {
t.Error(err)
}
}()
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
test0_msg := <-pc_test0.Messages()
if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
t.Error("Message was not as expected:", test0_msg)
}
test0_err := <-pc_test0.Errors()
if test0_err.Err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
}
pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
test1_msg := <-pc_test1.Messages()
if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
t.Error("Message was not as expected:", test1_msg)
}
pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
if err != nil {
t.Fatal(err)
}
other0_msg := <-pc_other0.Messages()
if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
t.Error("Message was not as expected:", other0_msg)
}
}
func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
consumer := NewConsumer(t, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Fatal(err)
}
select {
case <-pc.Messages():
t.Error("Did not epxect a message on the messages channel.")
case err := <-pc.Errors():
if err.Err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
}
}
errs := pc.Close().(sarama.ConsumerErrors)
if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
}
}
func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
_, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
if err != errOutOfExpectations {
t.Error("Expected ConsumePartition to return errOutOfExpectations")
}
if err := consumer.Close(); err != nil {
t.Error("No error expected on close, but found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
if err := consumer.Close(); err != nil {
t.Error("No error expected on close, but found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
_, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
if err != nil {
t.Error("Did not expect error, found:", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
if err := consumer.Close(); err != nil {
t.Error(err)
}
}
func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
pcmock.ExpectMessagesDrainedOnClose()
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Error(err)
}
// consume first message, not second one
<-pc.Messages()
if err := consumer.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}
func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
pcmock.YieldError(sarama.ErrInvalidMessage)
pcmock.YieldError(sarama.ErrInvalidMessage)
pcmock.ExpectErrorsDrainedOnClose()
pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
if err != nil {
t.Error(err)
}
// consume first and second error,
<-pc.Errors()
<-pc.Errors()
if err := consumer.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 0 {
t.Errorf("Expected no expectation failures to be set on the error reporter.")
}
}
func TestConsumerTopicMetadata(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
consumer.SetTopicMetadata(map[string][]int32{
"test1": {0, 1, 2, 3},
"test2": {0, 1, 2, 3, 4, 5, 6, 7},
})
topics, err := consumer.Topics()
if err != nil {
t.Error(t)
}
sortedTopics := sort.StringSlice(topics)
sortedTopics.Sort()
if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
t.Error("Unexpected topics returned:", sortedTopics)
}
partitions1, err := consumer.Partitions("test1")
if err != nil {
t.Error(t)
}
if len(partitions1) != 4 {
t.Error("Unexpected partitions returned:", len(partitions1))
}
partitions2, err := consumer.Partitions("test2")
if err != nil {
t.Error(t)
}
if len(partitions2) != 8 {
t.Error("Unexpected partitions returned:", len(partitions2))
}
if len(trm.errors) != 0 {
t.Errorf("Expected no expectation failures to be set on the error reporter.")
}
}
func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
trm := newTestReporterMock()
consumer := NewConsumer(trm, nil)
if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
t.Error("Expected sarama.ErrOutOfBrokers, found", err)
}
if len(trm.errors) != 1 {
t.Errorf("Expected an expectation failure to be set on the error reporter.")
}
}

View file

@ -1,48 +0,0 @@
/*
Package mocks provides mocks that can be used for testing applications
that use Sarama. The mock types provided by this package implement the
interfaces Sarama exports, so you can use them for dependency injection
in your tests.
All mock instances require you to set expectations on them before you
can use them. It will determine how the mock will behave. If an
expectation is not met, it will make your test fail.
NOTE: this package currently does not fall under the API stability
guarantee of Sarama as it is still considered experimental.
*/
package mocks
import (
"errors"
"github.com/Shopify/sarama"
)
// ErrorReporter is a simple interface that includes the testing.T methods we use to report
// expectation violations when using the mock objects.
type ErrorReporter interface {
Errorf(string, ...interface{})
}
// ValueChecker is a function type to be set in each expectation of the producer mocks
// to check the value passed.
type ValueChecker func(val []byte) error
var (
errProduceSuccess error = nil
errOutOfExpectations = errors.New("No more expectations set on mock")
errPartitionConsumerNotStarted = errors.New("The partition consumer was never started")
)
const AnyOffset int64 = -1000
type producerExpectation struct {
Result error
CheckFunction ValueChecker
}
type consumerExpectation struct {
Err error
Msg *sarama.ConsumerMessage
}

View file

@ -1,146 +0,0 @@
package mocks
import (
"sync"
"github.com/Shopify/sarama"
)
// SyncProducer implements sarama's SyncProducer interface for testing purposes.
// Before you can use it, you have to set expectations on the mock SyncProducer
// to tell it how to handle calls to SendMessage, so you can easily test success
// and failure scenarios.
type SyncProducer struct {
l sync.Mutex
t ErrorReporter
expectations []*producerExpectation
lastOffset int64
}
// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
// be the *testing.T instance of your test method. An error will be written to it if
// an expectation is violated. The config argument is currently unused, but is
// maintained to be compatible with the async Producer.
func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
return &SyncProducer{
t: t,
expectations: make([]*producerExpectation, 0),
}
}
////////////////////////////////////////////////
// Implement SyncProducer interface
////////////////////////////////////////////////
// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
// You have to set expectations on the mock producer before calling SendMessage, so it knows
// how to handle them. You can set a function in each expectation so that the message value
// checked by this function and an error is returned if the match fails.
// If there is no more remaining expectation when SendMessage is called,
// the mock producer will write an error to the test state object.
func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
sp.l.Lock()
defer sp.l.Unlock()
if len(sp.expectations) > 0 {
expectation := sp.expectations[0]
sp.expectations = sp.expectations[1:]
if expectation.CheckFunction != nil {
val, err := msg.Value.Encode()
if err != nil {
sp.t.Errorf("Input message encoding failed: %s", err.Error())
return -1, -1, err
}
errCheck := expectation.CheckFunction(val)
if errCheck != nil {
sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
return -1, -1, errCheck
}
}
if expectation.Result == errProduceSuccess {
sp.lastOffset++
msg.Offset = sp.lastOffset
return 0, msg.Offset, nil
}
return -1, -1, expectation.Result
}
sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
return -1, -1, errOutOfExpectations
}
// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
// You have to set expectations on the mock producer before calling SendMessages, so it knows
// how to handle them. If there is no more remaining expectations when SendMessages is called,
// the mock producer will write an error to the test state object.
func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
sp.l.Lock()
defer sp.l.Unlock()
if len(sp.expectations) >= len(msgs) {
expectations := sp.expectations[0 : len(msgs)-1]
sp.expectations = sp.expectations[len(msgs):]
for _, expectation := range expectations {
if expectation.Result != errProduceSuccess {
return expectation.Result
}
}
return nil
}
sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
return errOutOfExpectations
}
// Close corresponds with the Close method of sarama's SyncProducer implementation.
// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
// so it will write an error to the test state if there's any remaining expectations.
func (sp *SyncProducer) Close() error {
sp.l.Lock()
defer sp.l.Unlock()
if len(sp.expectations) > 0 {
sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
}
return nil
}
////////////////////////////////////////////////
// Setting expectations
////////////////////////////////////////////////
// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage
// will be called. The mock producer will first call the given function to check the message value.
// It will cascade the error of the function, if any, or handle the message as if it produced
// successfully, i.e. by returning a valid partition, and offset, and a nil error.
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) {
sp.l.Lock()
defer sp.l.Unlock()
sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
}
// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be
// called. The mock producer will first call the given function to check the message value.
// It will cascade the error of the function, if any, or handle the message as if it failed
// to produce successfully, i.e. by returning the provided error.
func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) {
sp.l.Lock()
defer sp.l.Unlock()
sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
}
// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
// called. The mock producer will handle the message as if it produced successfully, i.e. by
// returning a valid partition, and offset, and a nil error.
func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil)
}
// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
// called. The mock producer will handle the message as if it failed to produce
// successfully, i.e. by returning the provided error.
func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err)
}

View file

@ -1,124 +0,0 @@
package mocks
import (
"strings"
"testing"
"github.com/Shopify/sarama"
)
func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
var mp interface{} = &SyncProducer{}
if _, ok := mp.(sarama.SyncProducer); !ok {
t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
}
}
func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
sp := NewSyncProducer(t, nil)
defer func() {
if err := sp.Close(); err != nil {
t.Error(err)
}
}()
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
_, offset, err := sp.SendMessage(msg)
if err != nil {
t.Errorf("The first message should have been produced successfully, but got %s", err)
}
if offset != 1 || offset != msg.Offset {
t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
}
_, offset, err = sp.SendMessage(msg)
if err != nil {
t.Errorf("The second message should have been produced successfully, but got %s", err)
}
if offset != 2 || offset != msg.Offset {
t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
}
_, _, err = sp.SendMessage(msg)
if err != sarama.ErrOutOfBrokers {
t.Errorf("The third message should not have been produced successfully")
}
if err := sp.Close(); err != nil {
t.Error(err)
}
}
func TestSyncProducerWithTooManyExpectations(t *testing.T) {
trm := newTestReporterMock()
sp := NewSyncProducer(trm, nil)
sp.ExpectSendMessageAndSucceed()
sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err != nil {
t.Error("No error expected on first SendMessage call", err)
}
if err := sp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestSyncProducerWithTooFewExpectations(t *testing.T) {
trm := newTestReporterMock()
sp := NewSyncProducer(trm, nil)
sp.ExpectSendMessageAndSucceed()
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err != nil {
t.Error("No error expected on first SendMessage call", err)
}
if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
}
if err := sp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}
func TestSyncProducerWithCheckerFunction(t *testing.T) {
trm := newTestReporterMock()
sp := NewSyncProducer(trm, nil)
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err != nil {
t.Error("No error expected on first SendMessage call, found: ", err)
}
msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") {
t.Error("Error during value check expected on second SendMessage call, found:", err)
}
if err := sp.Close(); err != nil {
t.Error(err)
}
if len(trm.errors) != 1 {
t.Error("Expected to report an error")
}
}

View file

@ -11,6 +11,7 @@ type packetDecoder interface {
getInt64() (int64, error)
getVarint() (int64, error)
getArrayLength() (int, error)
getBool() (bool, error)
// Collections
getBytes() ([]byte, error)
@ -25,6 +26,7 @@ type packetDecoder interface {
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
// Stacks, see PushDecoder
push(in pushDecoder) error

View file

@ -13,6 +13,7 @@ type packetEncoder interface {
putInt64(in int64)
putVarint(in int64)
putArrayLength(in int) error
putBool(in bool)
// Collections
putBytes(in []byte) error

View file

@ -44,6 +44,10 @@ func (pe *prepEncoder) putArrayLength(in int) error {
return nil
}
func (pe *prepEncoder) putBool(in bool) {
pe.length++
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {

View file

@ -188,11 +188,6 @@ func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
return err
}
var records Records
if version >= 3 {
records = newDefaultRecords(nil)
} else {
records = newLegacyRecords(nil)
}
if err := records.decode(recordsDecoder); err != nil {
return err
}

View file

@ -84,9 +84,9 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
size += len(key) + len(val)
if len(msg.Headers) > 0 {
rec.Headers = make([]*RecordHeader, len(msg.Headers))
for i, h := range msg.Headers {
rec.Headers[i] = &h
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
for i := range msg.Headers {
rec.Headers[i] = &msg.Headers[i]
size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
}
}
set.recordsToSend.recordBatch.addRecord(rec)

View file

@ -1,6 +1,7 @@
package sarama
import (
"fmt"
"testing"
"time"
)
@ -196,6 +197,20 @@ func TestProduceSetV3RequestBuilding(t *testing.T) {
Partition: 0,
Key: StringEncoder(TestMessage),
Value: StringEncoder(TestMessage),
Headers: []RecordHeader{
RecordHeader{
Key: []byte("header-1"),
Value: []byte("value-1"),
},
RecordHeader{
Key: []byte("header-2"),
Value: []byte("value-2"),
},
RecordHeader{
Key: []byte("header-3"),
Value: []byte("value-3"),
},
},
Timestamp: now,
}
for i := 0; i < 10; i++ {
@ -218,5 +233,16 @@ func TestProduceSetV3RequestBuilding(t *testing.T) {
if rec.TimestampDelta != time.Duration(i)*time.Second {
t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta)
}
for j, h := range batch.Records[i].Headers {
exp := fmt.Sprintf("header-%d", j+1)
if string(h.Key) != exp {
t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key)
}
exp = fmt.Sprintf("value-%d", j+1)
if string(h.Value) != exp {
t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value)
}
}
}
}

View file

@ -11,6 +11,7 @@ var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice lengt
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
var errVarintOverflow = PacketDecodingError{"varint overflow"}
var errInvalidBool = PacketDecodingError{"invalid bool"}
type realDecoder struct {
raw []byte
@ -90,6 +91,17 @@ func (rd *realDecoder) getArrayLength() (int, error) {
return tmp, nil
}
func (rd *realDecoder) getBool() (bool, error) {
b, err := rd.getInt8()
if err != nil || b == 0 {
return false, err
}
if b != 1 {
return false, errInvalidBool
}
return true, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
@ -116,25 +128,29 @@ func (rd *realDecoder) getVarintBytes() ([]byte, error) {
return rd.getRawBytes(int(tmp))
}
func (rd *realDecoder) getString() (string, error) {
tmp, err := rd.getInt16()
func (rd *realDecoder) getStringLength() (int, error) {
length, err := rd.getInt16()
if err != nil {
return "", err
return 0, err
}
n := int(tmp)
n := int(length)
switch {
case n < -1:
return "", errInvalidStringLength
case n == -1:
return "", nil
case n == 0:
return "", nil
return 0, errInvalidStringLength
case n > rd.remaining():
rd.off = len(rd.raw)
return "", ErrInsufficientData
return 0, ErrInsufficientData
}
return n, nil
}
func (rd *realDecoder) getString() (string, error) {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return "", err
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
@ -143,12 +159,14 @@ func (rd *realDecoder) getString() (string, error) {
}
func (rd *realDecoder) getNullableString() (*string, error) {
tmp, err := rd.getInt16()
if err != nil || tmp == -1 {
n, err := rd.getStringLength()
if err != nil || n == -1 {
return nil, err
}
str, err := rd.getString()
return &str, err
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return &tmpStr, err
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
@ -264,6 +282,14 @@ func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
return rd.raw[start:rd.off], nil
}
func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
if rd.remaining() < offset+length {
return nil, ErrInsufficientData
}
off := rd.off + offset
return &realDecoder{raw: rd.raw[off : off+length]}, nil
}
// stacks
func (rd *realDecoder) push(in pushDecoder) error {

View file

@ -44,6 +44,14 @@ func (re *realEncoder) putArrayLength(in int) error {
return nil
}
func (re *realEncoder) putBool(in bool) {
if in {
re.putInt8(1)
return
}
re.putInt8(0)
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {

View file

@ -161,6 +161,11 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) {
bufSize := int(batchLen) - recordBatchOverhead
recBuffer, err := pd.getRawBytes(bufSize)
if err != nil {
if err == ErrInsufficientData {
b.PartialTrailingRecord = true
b.Records = nil
return nil
}
return err
}

View file

@ -3,8 +3,12 @@ package sarama
import "fmt"
const (
legacyRecords = iota
unknownRecords = iota
legacyRecords
defaultRecords
magicOffset = 16
magicLength = 1
)
// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
@ -22,7 +26,30 @@ func newDefaultRecords(batch *RecordBatch) Records {
return Records{recordsType: defaultRecords, recordBatch: batch}
}
// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
// The first return value indicates whether both fields are nil (and the type is not set).
// If both fields are not nil, it returns an error.
func (r *Records) setTypeFromFields() (bool, error) {
if r.msgSet == nil && r.recordBatch == nil {
return true, nil
}
if r.msgSet != nil && r.recordBatch != nil {
return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
}
r.recordsType = defaultRecords
if r.msgSet != nil {
r.recordsType = legacyRecords
}
return false, nil
}
func (r *Records) encode(pe packetEncoder) error {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return err
}
}
switch r.recordsType {
case legacyRecords:
if r.msgSet == nil {
@ -38,7 +65,31 @@ func (r *Records) encode(pe packetEncoder) error {
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) setTypeFromMagic(pd packetDecoder) error {
dec, err := pd.peek(magicOffset, magicLength)
if err != nil {
return err
}
magic, err := dec.getInt8()
if err != nil {
return err
}
r.recordsType = defaultRecords
if magic < 2 {
r.recordsType = legacyRecords
}
return nil
}
func (r *Records) decode(pd packetDecoder) error {
if r.recordsType == unknownRecords {
if err := r.setTypeFromMagic(pd); err != nil {
return nil
}
}
switch r.recordsType {
case legacyRecords:
r.msgSet = &MessageSet{}
@ -51,6 +102,12 @@ func (r *Records) decode(pd packetDecoder) error {
}
func (r *Records) numRecords() (int, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return 0, err
}
}
switch r.recordsType {
case legacyRecords:
if r.msgSet == nil {
@ -67,7 +124,15 @@ func (r *Records) numRecords() (int, error) {
}
func (r *Records) isPartial() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case unknownRecords:
return false, nil
case legacyRecords:
if r.msgSet == nil {
return false, nil
@ -83,6 +148,12 @@ func (r *Records) isPartial() (bool, error) {
}
func (r *Records) isControl() (bool, error) {
if r.recordsType == unknownRecords {
if empty, err := r.setTypeFromFields(); err != nil || empty {
return false, err
}
}
switch r.recordsType {
case legacyRecords:
return false, nil

View file

@ -31,7 +31,7 @@ func TestLegacyRecords(t *testing.T) {
}
set = &MessageSet{}
r = newLegacyRecords(nil)
r = Records{}
err = decode(exp, set)
if err != nil {
@ -42,6 +42,9 @@ func TestLegacyRecords(t *testing.T) {
t.Fatal(err)
}
if r.recordsType != legacyRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords)
}
if !reflect.DeepEqual(set, r.msgSet) {
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.msgSet)
}
@ -96,7 +99,7 @@ func TestDefaultRecords(t *testing.T) {
}
batch = &RecordBatch{}
r = newDefaultRecords(nil)
r = Records{}
err = decode(exp, batch)
if err != nil {
@ -107,6 +110,9 @@ func TestDefaultRecords(t *testing.T) {
t.Fatal(err)
}
if r.recordsType != defaultRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords)
}
if !reflect.DeepEqual(batch, r.recordBatch) {
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.recordBatch)
}

View file

@ -114,6 +114,8 @@ func allocateBody(key, version int16) protocolBody {
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
case 37:
return &CreatePartitionsRequest{}
}
return nil
}

View file

@ -1,10 +0,0 @@
# Sarama tools
This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
To install all tools, run `go get github.com/Shopify/sarama/tools/...`

View file

@ -1,2 +0,0 @@
kafka-console-consumer
kafka-console-consumer.test

View file

@ -1,29 +0,0 @@
# kafka-console-consumer
A simple command line tool to consume partitions of a topic and print the
messages on the standard output.
### Installation
go get github.com/Shopify/sarama/tools/kafka-console-consumer
### Usage
# Minimum invocation
kafka-console-consumer -topic=test -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-consumer -topic=test
# You can specify the offset you want to start at. It can be either
# `oldest`, `newest`. The default is `newest`.
kafka-console-consumer -topic=test -offset=oldest
kafka-console-consumer -topic=test -offset=newest
# You can specify the partition(s) you want to consume as a comma-separated
# list. The default is `all`.
kafka-console-consumer -topic=test -partitions=1,2,3
# Display all command line options
kafka-console-consumer -help

View file

@ -1,145 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *verbose {
sarama.Logger = logger
}
var initialOffset int64
switch *offset {
case "oldest":
initialOffset = sarama.OffsetOldest
case "newest":
initialOffset = sarama.OffsetNewest
default:
printUsageErrorAndExit("-offset should be `oldest` or `newest`")
}
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
if err != nil {
printErrorAndExit(69, "Failed to start consumer: %s", err)
}
partitionList, err := getPartitions(c)
if err != nil {
printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
}
var (
messages = make(chan *sarama.ConsumerMessage, *bufferSize)
closing = make(chan struct{})
wg sync.WaitGroup
)
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Kill, os.Interrupt)
<-signals
logger.Println("Initiating shutdown of consumer...")
close(closing)
}()
for _, partition := range partitionList {
pc, err := c.ConsumePartition(*topic, partition, initialOffset)
if err != nil {
printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
}
go func(pc sarama.PartitionConsumer) {
<-closing
pc.AsyncClose()
}(pc)
wg.Add(1)
go func(pc sarama.PartitionConsumer) {
defer wg.Done()
for message := range pc.Messages() {
messages <- message
}
}(pc)
}
go func() {
for msg := range messages {
fmt.Printf("Partition:\t%d\n", msg.Partition)
fmt.Printf("Offset:\t%d\n", msg.Offset)
fmt.Printf("Key:\t%s\n", string(msg.Key))
fmt.Printf("Value:\t%s\n", string(msg.Value))
fmt.Println()
}
}()
wg.Wait()
logger.Println("Done consuming topic", *topic)
close(messages)
if err := c.Close(); err != nil {
logger.Println("Failed to close consumer: ", err)
}
}
func getPartitions(c sarama.Consumer) ([]int32, error) {
if *partitions == "all" {
return c.Partitions(*topic)
}
tmp := strings.Split(*partitions, ",")
var pList []int32
for i := range tmp {
val, err := strconv.ParseInt(tmp[i], 10, 32)
if err != nil {
return nil, err
}
pList = append(pList, int32(val))
}
return pList, nil
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}

View file

@ -1,2 +0,0 @@
kafka-console-partitionconsumer
kafka-console-partitionconsumer.test

View file

@ -1,28 +0,0 @@
# kafka-console-partitionconsumer
NOTE: this tool is deprecated in favour of the more general and more powerful
`kafka-console-consumer`.
A simple command line tool to consume a partition of a topic and print the messages
on the standard output.
### Installation
go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer
### Usage
# Minimum invocation
kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-partitionconsumer -topic=test -partition=4
# You can specify the offset you want to start at. It can be either
# `oldest`, `newest`, or a specific offset number
kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
# Display all command line options
kafka-console-partitionconsumer -help

View file

@ -1,102 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"os/signal"
"strconv"
"strings"
"github.com/Shopify/sarama"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
topic = flag.String("topic", "", "REQUIRED: the topic to consume")
partition = flag.Int("partition", -1, "REQUIRED: the partition to consume")
offset = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
verbose = flag.Bool("verbose", false, "Whether to turn on sarama logging")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
}
if *topic == "" {
printUsageErrorAndExit("-topic is required")
}
if *partition == -1 {
printUsageErrorAndExit("-partition is required")
}
if *verbose {
sarama.Logger = logger
}
var (
initialOffset int64
offsetError error
)
switch *offset {
case "oldest":
initialOffset = sarama.OffsetOldest
case "newest":
initialOffset = sarama.OffsetNewest
default:
initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
}
if offsetError != nil {
printUsageErrorAndExit("Invalid initial offset: %s", *offset)
}
c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
if err != nil {
printErrorAndExit(69, "Failed to start consumer: %s", err)
}
pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
if err != nil {
printErrorAndExit(69, "Failed to start partition consumer: %s", err)
}
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Kill, os.Interrupt)
<-signals
pc.AsyncClose()
}()
for msg := range pc.Messages() {
fmt.Printf("Offset:\t%d\n", msg.Offset)
fmt.Printf("Key:\t%s\n", string(msg.Key))
fmt.Printf("Value:\t%s\n", string(msg.Value))
fmt.Println()
}
if err := c.Close(); err != nil {
logger.Println("Failed to close consumer: ", err)
}
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}

View file

@ -1,2 +0,0 @@
kafka-console-producer
kafka-console-producer.test

View file

@ -1,34 +0,0 @@
# kafka-console-producer
A simple command line tool to produce a single message to Kafka.
### Installation
go get github.com/Shopify/sarama/tools/kafka-console-producer
### Usage
# Minimum invocation
kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
# It will pick up a KAFKA_PEERS environment variable
export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
kafka-console-producer -topic=test -value=value
# It will read the value from stdin by using pipes
echo "hello world" | kafka-console-producer -topic=test
# Specify a key:
echo "hello world" | kafka-console-producer -topic=test -key=key
# Partitioning: by default, kafka-console-producer will partition as follows:
# - manual partitioning if a -partition is provided
# - hash partitioning by key if a -key is provided
# - random partioning otherwise.
#
# You can override this using the -partitioner argument:
echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
# Display all command line options
kafka-console-producer -help

View file

@ -1,124 +0,0 @@
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"github.com/Shopify/sarama"
"github.com/rcrowley/go-metrics"
)
var (
brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
topic = flag.String("topic", "", "REQUIRED: the topic to produce to")
key = flag.String("key", "", "The key of the message to produce. Can be empty.")
value = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
partition = flag.Int("partition", -1, "The partition to produce to.")
verbose = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr")
silent = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
logger = log.New(os.Stderr, "", log.LstdFlags)
)
func main() {
flag.Parse()
if *brokerList == "" {
printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
}
if *topic == "" {
printUsageErrorAndExit("no -topic specified")
}
if *verbose {
sarama.Logger = logger
}
config := sarama.NewConfig()
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Return.Successes = true
switch *partitioner {
case "":
if *partition >= 0 {
config.Producer.Partitioner = sarama.NewManualPartitioner
} else {
config.Producer.Partitioner = sarama.NewHashPartitioner
}
case "hash":
config.Producer.Partitioner = sarama.NewHashPartitioner
case "random":
config.Producer.Partitioner = sarama.NewRandomPartitioner
case "manual":
config.Producer.Partitioner = sarama.NewManualPartitioner
if *partition == -1 {
printUsageErrorAndExit("-partition is required when partitioning manually")
}
default:
printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
}
message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
if *key != "" {
message.Key = sarama.StringEncoder(*key)
}
if *value != "" {
message.Value = sarama.StringEncoder(*value)
} else if stdinAvailable() {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
}
message.Value = sarama.ByteEncoder(bytes)
} else {
printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
}
producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
if err != nil {
printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
}
defer func() {
if err := producer.Close(); err != nil {
logger.Println("Failed to close Kafka producer cleanly:", err)
}
}()
partition, offset, err := producer.SendMessage(message)
if err != nil {
printErrorAndExit(69, "Failed to produce message: %s", err)
} else if !*silent {
fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
}
if *showMetrics {
metrics.WriteOnce(config.MetricRegistry, os.Stderr)
}
}
func printErrorAndExit(code int, format string, values ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
fmt.Fprintln(os.Stderr)
os.Exit(code)
}
func printUsageErrorAndExit(message string) {
fmt.Fprintln(os.Stderr, "ERROR:", message)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Available command line options:")
flag.PrintDefaults()
os.Exit(64)
}
func stdinAvailable() bool {
stat, _ := os.Stdin.Stat()
return (stat.Mode() & os.ModeCharDevice) == 0
}

View file

@ -2,7 +2,9 @@ package sarama
import (
"bufio"
"fmt"
"net"
"regexp"
)
type none struct{}
@ -147,5 +149,36 @@ var (
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
minVersion = V0_8_2_0
)
func ParseKafkaVersion(s string) (KafkaVersion, error) {
var major, minor, veryMinor, patch uint
var err error
if s[0] == '0' {
err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
} else {
err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
}
if err != nil {
return minVersion, err
}
return newKafkaVersion(major, minor, veryMinor, patch), nil
}
func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
if !regexp.MustCompile(pattern).MatchString(s) {
return fmt.Errorf("invalid version `%s`", s)
}
_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
return err
}
func (v KafkaVersion) String() string {
if v.version[0] == 0 {
return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
} else {
return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
}
}

View file

@ -19,3 +19,23 @@ func TestVersionCompare(t *testing.T) {
t.Error("0.8.2.1 >= 0.10.0.0")
}
}
func TestVersionParsing(t *testing.T) {
validVersions := []string{"0.8.2.0", "0.8.2.1", "0.9.0.0", "0.10.2.0", "1.0.0"}
for _, s := range validVersions {
v, err := ParseKafkaVersion(s)
if err != nil {
t.Errorf("could not parse valid version %s: %s", s, err)
}
if v.String() != s {
t.Errorf("version %s != %s", v.String(), s)
}
}
invalidVersions := []string{"0.8.2-4", "0.8.20", "1.19.0.0", "1.0.x"}
for _, s := range invalidVersions {
if _, err := ParseKafkaVersion(s); err == nil {
t.Errorf("invalid version %s parsed without error", s)
}
}
}

View file

@ -1,22 +0,0 @@
#!/bin/sh
set -ex
# Launch and wait for toxiproxy
${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
while ! nc -q 1 localhost 2181 </dev/null; do echo "Waiting"; sleep 1; done
while ! nc -q 1 localhost 9092 </dev/null; do echo "Waiting"; sleep 1; done
# Launch and wait for Zookeeper
for i in 1 2 3 4 5; do
KAFKA_PORT=`expr $i + 9090`
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
done
while ! nc -q 1 localhost 21805 </dev/null; do echo "Waiting"; sleep 1; done
# Launch and wait for Kafka
for i in 1 2 3 4 5; do
KAFKA_PORT=`expr $i + 9090`
cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-start.sh -daemon config/server.properties
done
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done

View file

@ -1,8 +0,0 @@
#!/bin/sh
set -ex
cd ${KAFKA_INSTALL_ROOT}/kafka-9092
bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic test.1 --zookeeper localhost:2181
bin/kafka-topics.sh --create --partitions 4 --replication-factor 3 --topic test.4 --zookeeper localhost:2181
bin/kafka-topics.sh --create --partitions 64 --replication-factor 3 --topic test.64 --zookeeper localhost:2181

View file

@ -1,49 +0,0 @@
#!/bin/sh
set -ex
TOXIPROXY_VERSION=2.0.0
mkdir -p ${KAFKA_INSTALL_ROOT}
if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
fi
if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
chmod +x ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
fi
rm -f ${KAFKA_INSTALL_ROOT}/toxiproxy
ln -s ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ${KAFKA_INSTALL_ROOT}/toxiproxy
for i in 1 2 3 4 5; do
ZK_PORT=`expr $i + 2180`
ZK_PORT_REAL=`expr $i + 21800`
KAFKA_PORT=`expr $i + 9090`
KAFKA_PORT_REAL=`expr $i + 29090`
# unpack kafka
mkdir -p ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}
tar xzf ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -C ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} --strip-components 1
# broker configuration
cp ${REPOSITORY_ROOT}/vagrant/server.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/KAFKAPORT/${KAFKA_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/KAFKA_HOSTNAME/${KAFKA_HOSTNAME}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
sed -i s/ZK_PORT/${ZK_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
KAFKA_DATADIR="${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data"
mkdir -p ${KAFKA_DATADIR}
sed -i s#KAFKA_DATADIR#${KAFKA_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
# zookeeper configuration
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
sed -i s/ZK_PORT/${ZK_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
ZK_DATADIR="${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}"
mkdir -p ${ZK_DATADIR}
sed -i s#ZK_DATADIR#${ZK_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
echo $i > ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
done

View file

@ -1,9 +0,0 @@
start on started zookeeper-ZK_PORT
stop on stopping zookeeper-ZK_PORT
# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper)
script
sleep 2
export KAFKA_HEAP_OPTS="-Xmx320m"
exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
end script

View file

@ -1,15 +0,0 @@
#!/bin/sh
set -ex
apt-get update
yes | apt-get install default-jre
export KAFKA_INSTALL_ROOT=/opt
export KAFKA_HOSTNAME=192.168.100.67
export KAFKA_VERSION=0.9.0.1
export REPOSITORY_ROOT=/vagrant
sh /vagrant/vagrant/install_cluster.sh
sh /vagrant/vagrant/setup_services.sh
sh /vagrant/vagrant/create_topics.sh

View file

@ -1,22 +0,0 @@
#!/bin/sh
set -ex
${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
PID=$!
while ! nc -q 1 localhost 8474 </dev/null; do echo "Waiting"; sleep 1; done
wget -O/dev/null -S --post-data='{"name":"zk1", "upstream":"localhost:21801", "listen":"0.0.0.0:2181"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk2", "upstream":"localhost:21802", "listen":"0.0.0.0:2182"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk3", "upstream":"localhost:21803", "listen":"0.0.0.0:2183"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk4", "upstream":"localhost:21804", "listen":"0.0.0.0:2184"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"zk5", "upstream":"localhost:21805", "listen":"0.0.0.0:2185"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka1", "upstream":"localhost:29091", "listen":"0.0.0.0:9091"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka2", "upstream":"localhost:29092", "listen":"0.0.0.0:9092"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka3", "upstream":"localhost:29093", "listen":"0.0.0.0:9093"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka4", "upstream":"localhost:29094", "listen":"0.0.0.0:9094"}' localhost:8474/proxies
wget -O/dev/null -S --post-data='{"name":"kafka5", "upstream":"localhost:29095", "listen":"0.0.0.0:9095"}' localhost:8474/proxies
wait $PID

View file

@ -1,127 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=KAFKAID
reserved.broker.max.id=10000
############################# Socket Server Settings #############################
# The port the socket server listens on
port=KAFKAPORT
# Hostname the broker will bind to. If not set, the server will bind to all interfaces
host.name=localhost
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
advertised.host.name=KAFKA_HOSTNAME
advertised.port=KAFKAID
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
# advertised.port=<port accessible by clients>
# The number of threads handling network requests
num.network.threads=2
# The number of threads doing disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=1048576
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=1048576
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma seperated list of directories under which to store log files
log.dirs=KAFKA_DATADIR
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=2
# Create new topics with a replication factor of 2 so failover can be tested
# more easily.
default.replication.factor=2
auto.create.topics.enable=false
delete.topic.enable=true
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
log.retention.bytes=268435456
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=268435456
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=60000
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
log.cleaner.enable=false
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=localhost:ZK_PORT
# Timeout in ms for connecting to zookeeper
zookeeper.session.timeout.ms=3000
zookeeper.connection.timeout.ms=3000

View file

@ -1,29 +0,0 @@
#!/bin/sh
set -ex
stop toxiproxy || true
cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
start toxiproxy
for i in 1 2 3 4 5; do
ZK_PORT=`expr $i + 2180`
KAFKA_PORT=`expr $i + 9090`
stop zookeeper-${ZK_PORT} || true
# set up zk service
cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
# set up kafka service
cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
start zookeeper-${ZK_PORT}
done
# Wait for the last kafka node to finish booting
while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done

View file

@ -1,6 +0,0 @@
start on started networking
stop on shutdown
env KAFKA_INSTALL_ROOT=/opt
exec /opt/run_toxiproxy.sh

View file

@ -1,7 +0,0 @@
start on started toxiproxy
stop on stopping toxiproxy
script
export KAFKA_HEAP_OPTS="-Xmx192m"
exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties
end script

View file

@ -1,36 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=ZK_DATADIR
# the port at which the clients will connect
clientPort=ZK_PORT
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
server.1=localhost:2281:2381
server.2=localhost:2282:2382
server.3=localhost:2283:2383
server.4=localhost:2284:2384
server.5=localhost:2285:2385

View file

@ -1,5 +1,295 @@
Apache Thrift Changelog
Thrift 0.11.0
--------------------------------------------------------------------------------
## Sub-task
* [THRIFT-2733] - Erlang coding standards
* [THRIFT-2740] - Perl coding standards
* [THRIFT-3610] - Streamline exception handling in Python server handler
* [THRIFT-3686] - Java processor should report internal error on uncaught exception
* [THRIFT-4049] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types
* [THRIFT-4053] - Skip() should throw TProtocolException.INVALID_DATA on unknown data types
* [THRIFT-4136] - Align is_binary() method with is_string() to simplify those checks
* [THRIFT-4137] - Fix remaining undefined behavior invalid vptr casts in Thrift Compiler
* [THRIFT-4138] - Fix remaining undefined behavior invalid vptr casts in C++ library
* [THRIFT-4296] - Fix Ubuntu Xenial build environment for the python language
* [THRIFT-4298] - Fix Ubuntu Xenial build environment for the go 1.6 language
* [THRIFT-4299] - Fix Ubuntu Xenial build environment for the D language
* [THRIFT-4300] - Fix make cross in Ubuntu Xenial docker environment, once all language support issues are fixed
* [THRIFT-4302] - Fix Ubuntu Xenial make cross testing for lua and php7
* [THRIFT-4398] - Update EXTRA_DIST for "make dist"
## Bug
* [THRIFT-381] - Fail fast if configure detects C++ problems
* [THRIFT-1677] - MinGW support broken
* [THRIFT-1805] - Thrift should not swallow ALL exceptions
* [THRIFT-2026] - Fix TCompactProtocol 64 bit builds
* [THRIFT-2642] - Recursive structs don't work in python
* [THRIFT-2889] - stable release 0.9.2, erlang tutorial broken
* [THRIFT-2913] - Ruby Server Thrift::ThreadPoolServer should serve inside a thread
* [THRIFT-2998] - Node.js: Missing header from http request
* [THRIFT-3000] - .NET implementation has trouble with mixed IP modes
* [THRIFT-3281] - Travis CI build passed but the log says BUILD FAILED
* [THRIFT-3358] - Makefile:1362: *** missing separator. Stop.
* [THRIFT-3600] - Make TTwisted server send exception on unexpected handler error
* [THRIFT-3602] - Make Tornado server send exception on unexpected handler error
* [THRIFT-3657] - D TFileWriterTransport close should use non-priority send
* [THRIFT-3700] - Go Map has wrong default value when optional
* [THRIFT-3703] - Unions Field Count Does Not Consider Map/Set/List Fields
* [THRIFT-3730] - server log error twice
* [THRIFT-3778] - go client can not pass method parameter to server of other language if no field_id is given
* [THRIFT-3784] - thrift-maven-plugin generates invalid include directories for IDL in dependency JARs
* [THRIFT-3801] - Node Thrift client throws exception with multiplexer and responses that are bigger than a single buffer
* [THRIFT-3821] - TMemoryBuffer buffer may overflow when resizing
* [THRIFT-3832] - Thrift version 0.9.3 example on Windows, Visual Studio, linking errors during compiling
* [THRIFT-3847] - thrift/config.h includes a #define for VERSION which will likely conflict with existing user environment or code
* [THRIFT-3873] - Fix various build warnings when using Visual Studio
* [THRIFT-3891] - TNonblockingServer configured with more than one IO threads does not always return from serve() upon stop()
* [THRIFT-3892] - Thrift uses TLS SNI extension provided by OpenSSL library. Older version of OpenSSL(< 0.9.8f) may create problem because they do not support 'SSL_set_tlsext_host_name()'.
* [THRIFT-3895] - Build fails using Java 1.8 with Ant < 1.9
* [THRIFT-3896] - map<string,string> data with number string key cannot access that deserialized by php extension
* [THRIFT-3938] - Python TNonblockingServer does not work with SSL
* [THRIFT-3944] - TSSLSocket has dead code in checkHandshake
* [THRIFT-3946] - Java 1.5 compatibility broken for binary fields (java5 option)
* [THRIFT-3960] - Inherited services in Lua generator are not named correctly
* [THRIFT-3962] - Ant build.xml broken on Windows for Java library
* [THRIFT-3963] - Thrift.cabal filename does not match module name
* [THRIFT-3967] - gobject/gparam.h:166:33: warning: enumerator value for G_PARAM_DEPRECATED is not an integer constant expression
* [THRIFT-3968] - Deserializing empty string/binary fields
* [THRIFT-3974] - Using clang-3.8 and ThreadSanitizer on the concurrency_test claims bad PThread behavior
* [THRIFT-3984] - PHP7 extension causes segfault
* [THRIFT-4008] - broken ci due to upstream dependency versioning break
* [THRIFT-4009] - Use @implementer instead of implements in TTwisted.py
* [THRIFT-4010] - Q.fcall messing up with *this* pointer inside called function
* [THRIFT-4011] - Sets of Thrift structs generate Go code that can't be serialized to JSON
* [THRIFT-4012] - Python Twisted implementation uses implements, not compatible with Py3
* [THRIFT-4014] - align C# meta data in AssemblyInfo.cs
* [THRIFT-4015] - Fix wrongly spelled "Thirft"s
* [THRIFT-4016] - testInsanity() impl does not conform to test spec in ThriftTest.thrift
* [THRIFT-4023] - Skip unexpected field types on read/write
* [THRIFT-4024] - Skip() should throw on unknown data types
* [THRIFT-4026] - TSSLSocket doesn't work with Python < 2.7.9
* [THRIFT-4029] - Accelerated protocols do not build from thrift-py 0.10.0 on PyPI
* [THRIFT-4031] - Go plugin generates invalid code for lists of typedef'ed built-in types
* [THRIFT-4033] - Default build WITH_PLUGIN=ON for all builds results in packaging errors
* [THRIFT-4034] - CMake doesn't work to build compiler on MacOS
* [THRIFT-4036] - Add .NET Core environment/build support to the docker image
* [THRIFT-4038] - socket check: checking an unsigned number against >= 0 never fails
* [THRIFT-4042] - ExtractionError when using accelerated thrift in a multiprocess test
* [THRIFT-4043] - thrift perl debian package is placing files in the wrong place
* [THRIFT-4044] - Build job 17 failing on every pull request; hspec core (haskell) 2.4 issue
* [THRIFT-4046] - MinGW with gcc 6.2 does not compile on Windows
* [THRIFT-4060] - Thrift printTo ostream overload mechanism breaks down when types are nested
* [THRIFT-4062] - Remove debug print from TServiceClient
* [THRIFT-4065] - Document Perl ForkingServer signal restriction imposed by THRIFT-3848 and remove unnecessary code
* [THRIFT-4068] - A code comment in Java ServerSocket is wrong around accept()
* [THRIFT-4073] - enum files are still being generated with unused imports
* [THRIFT-4076] - Appveyor builds failing because ant 1.9.8 was removed from apache servers
* [THRIFT-4077] - AI_ADDRCONFIG redefined after recent change to PlatformSocket header
* [THRIFT-4079] - Generated perl code that returns structures from included thrift files is missing a necessary use clause
* [THRIFT-4087] - Spurious exception destroying TThreadedServer because of incorrect join() call
* [THRIFT-4102] - TBufferedTransport performance issue since 0.10.0
* [THRIFT-4106] - concurrency_test fails randomly
* [THRIFT-4108] - c_glib thrift ssl has multiple bugs and deprecated functions
* [THRIFT-4109] - Configure Script uses string comparison for versions
* [THRIFT-4129] - C++ TNonblockingServer fd leak when failing to dispatch new connections
* [THRIFT-4131] - Javascript with WebSocket handles oneway methods wrong
* [THRIFT-4134] - Fix remaining undefined behavior invalid vptr casts
* [THRIFT-4140] - Use of non-thread-safe function gmtime()
* [THRIFT-4141] - Installation of haxe in docker files refers to a redirect link and fails
* [THRIFT-4147] - Rust: protocol should accept transports with non-static lifetime
* [THRIFT-4148] - [maven-thrift-plugin] compile error while import a thrift in dependency jar file.
* [THRIFT-4149] - System.out pollutes log files
* [THRIFT-4154] - PHP close() of a TSocket needs to close any type of socket
* [THRIFT-4158] - minor issue in README-MSYS2.md
* [THRIFT-4159] - Building tests fails on MSYS2 (MinGW64) due to a (small?) linker error
* [THRIFT-4160] - TNonblocking server fix use of closed/freed connections
* [THRIFT-4161] - TNonBlocking server using uninitialized event in error paths
* [THRIFT-4162] - TNonBlocking handling of TSockets in error state is incorrect after fd is closed
* [THRIFT-4164] - Core in TSSLSocket cleanupOpenSSL when destroying a mutex used by openssl
* [THRIFT-4165] - C++ build has many warnings under c++03 due to recent changes, cmake needs better platform-independent language level control
* [THRIFT-4166] - Recent fix to remove boost::lexical_cast usage broke VS2010
* [THRIFT-4167] - Missing compile flag
* [THRIFT-4170] - Support lua 5.1 or earlier properly for object length determination
* [THRIFT-4172] - node.js tutorial client does not import assert, connection issues are not handled properly
* [THRIFT-4177] - Java compiler produces deep copy constructor that could make shallow copy instead
* [THRIFT-4184] - Building on Appveyor: invalid escape sequence \L
* [THRIFT-4185] - fb303 counter encoding fix
* [THRIFT-4189] - Framed/buffered transport Dispose() does not dispose the nested transport
* [THRIFT-4193] - Lower the default maxReadBufferBytes for non-blocking servers
* [THRIFT-4195] - Compilation to GO produces broken code
* [THRIFT-4196] - Cannot generate recursive Rust types
* [THRIFT-4204] - typo in compact spec
* [THRIFT-4206] - Strings in container fields are not decoded properly with py:dynamic and py:utf8strings
* [THRIFT-4208] - C# NamedPipesServer not really working in some scenarios
* [THRIFT-4211] - Fix GError glib management under Thrift
* [THRIFT-4212] - c_glib flush tries to close SSL even if socket is invalid
* [THRIFT-4213] - Travis build fails at curl -sSL https://www.npmjs.com/install.sh | sh
* [THRIFT-4215] - Golang TTransportFactory Pattern Squelches Errors
* [THRIFT-4216] - Golang Http Clients Do Not Respect User Options
* [THRIFT-4218] - Set TCP_NODELAY for PHP client socket
* [THRIFT-4219] - Golang HTTP clients created with Nil buffer
* [THRIFT-4231] - TJSONProtocol throws unexpected non-Thrift-exception on null strings
* [THRIFT-4232] - ./configure does bad ant version check
* [THRIFT-4234] - Travis build fails cross language tests with "Unsupported security protocol type"
* [THRIFT-4237] - Go TServerSocket Race Conditions
* [THRIFT-4240] - Go TSimpleServer does not close properly
* [THRIFT-4243] - Go TSimpleServer race on wait in Stop() method
* [THRIFT-4245] - Golang TFramedTransport's writeBuffer increases if writes to transport failed
* [THRIFT-4246] - Sequence number mismatch on multiplexed clients
* [THRIFT-4247] - Compile fails with openssl 1.1
* [THRIFT-4248] - Compile fails - strncpy, memcmp, memset not declared in src/thrift/transport/TSSLSocket.cpp
* [THRIFT-4251] - Java Epoll Selector Bug
* [THRIFT-4257] - Typescript async callbacks do not provide the correct types
* [THRIFT-4258] - Boost/std thread wrapping faultiness
* [THRIFT-4260] - Go context generation issue. Context is parameter in Interface not in implementation
* [THRIFT-4261] - Go context generation issue: breaking change in generated code regarding thrift.TProcessorFunction interface
* [THRIFT-4262] - Invalid binding to InterlockedCompareExchange64() with 64-bit targets
* [THRIFT-4263] - Fix use after free bug for thrown exceptions
* [THRIFT-4266] - Erlang library throws during skipping fields of composite type (maps, lists, structs, sets)
* [THRIFT-4268] - Erlang library emits debugging output in transport layer
* [THRIFT-4273] - erlang:now/0: Deprecated BIF.
* [THRIFT-4274] - Python feature tests for SSL/TLS failing
* [THRIFT-4279] - Wrong path in include directive in generated Thrift sources
* [THRIFT-4283] - TNamedPipeServer race condition in interrupt
* [THRIFT-4284] - File contains a NBSP: lib/nodejs/lib/thrift/web_server.js
* [THRIFT-4290] - C# nullable option generates invalid code for non-required enum field with default value
* [THRIFT-4292] - TimerManager::remove() is not implemented
* [THRIFT-4307] - Make ssl-open timeout effective in golang client
* [THRIFT-4312] - Erlang client cannot connect to Python server: exception error: econnrefused
* [THRIFT-4313] - Program code of the Erlang tutorial files contain syntax errors
* [THRIFT-4316] - TByteBuffer.java will read too much data if a previous read returns fewer bytes than requested
* [THRIFT-4319] - command line switch for "evhttp" incorrectly resolved to anon pipes
* [THRIFT-4323] - range check errors or NPE in edge cases
* [THRIFT-4324] - field names can conflict with local vars in generated code
* [THRIFT-4328] - Travis CI builds are timing out (job 1) and haxe builds are failing since 9/11
* [THRIFT-4329] - c_glib Doesn't have a multiplexed processor
* [THRIFT-4331] - C++: TSSLSockets bug in handling huge messages, bug in handling polling
* [THRIFT-4332] - Binary protocol has memory leaks
* [THRIFT-4334] - Perl indentation incorrect when defaulting field attribute to a struct
* [THRIFT-4339] - Thrift Framed Transport in Erlang crashes server when client disconnects
* [THRIFT-4340] - Erlang fix a crash on client close
* [THRIFT-4355] - Javascript indentation incorrect when defaulting field attribute to a struct
* [THRIFT-4356] - thrift_protocol call Transport cause Segmentation fault
* [THRIFT-4359] - Haxe compiler looks like it is producing incorrect code for map or set key that is binary type
* [THRIFT-4362] - Missing size-check can lead to huge memory allocation
* [THRIFT-4364] - Website contributing guide erroneously recommends submitting patches in JIRA
* [THRIFT-4365] - Perl generated code uses indirect object syntax, which occasionally causes compilation errors.
* [THRIFT-4367] - python TProcessor.process is missing "self"
* [THRIFT-4370] - Ubuntu Artful cppcheck and flake8 are more stringent and causing SCA build job failures
* [THRIFT-4372] - Pipe write operations across a network are limited to 65,535 bytes per write.
* [THRIFT-4374] - cannot load thrift_protocol due to undefined symbol: _ZTVN10__cxxabiv120__si_class_type_infoE
* [THRIFT-4376] - Coverity high impact issue resolution
* [THRIFT-4377] - haxe. socket handles leak in TSimpleServer
* [THRIFT-4381] - Wrong isset bitfield value after transmission
* [THRIFT-4385] - Go remote client -u flag is broken
* [THRIFT-4392] - compiler/..../plugin.thrift structs mis-ordered blows up ocaml generator
* [THRIFT-4395] - Unable to build in the ubuntu-xenial docker image: clap 2.28 requires Rust 1.20
* [THRIFT-4396] - inconsistent (or plain wrong) version numbers in master/trunk
## Documentation
* [THRIFT-4157] - outdated readme about Haxe installation on Linux
## Improvement
* [THRIFT-105] - make a thrift_spec for a structures with negative tags
* [THRIFT-281] - Cocoa library code needs comments, badly
* [THRIFT-775] - performance improvements for Perl
* [THRIFT-2221] - Generate c++ code with std::shared_ptr instead of boost::shared_ptr.
* [THRIFT-2364] - OCaml: Use Oasis exclusively for build process
* [THRIFT-2504] - TMultiplexedProcessor should allow registering default processor called if no service name is present
* [THRIFT-3207] - Enable build with OpenSSL 1.1.0 series
* [THRIFT-3272] - Perl SSL Authentication Support
* [THRIFT-3357] - Generate EnumSet/EnumMap where elements/keys are enums
* [THRIFT-3369] - Implement SSL/TLS support on C with c_glib
* [THRIFT-3467] - Go Maps for Thrift Sets Should Have Values of Type struct{}
* [THRIFT-3580] - THeader for Haskell
* [THRIFT-3627] - Missing basic code style consistency of JavaScript.
* [THRIFT-3706] - There's no support for Multiplexed protocol on c_glib library
* [THRIFT-3766] - Add getUnderlyingTransport() to TZlibTransport
* [THRIFT-3776] - Go code from multiple thrift files with the same namespace
* [THRIFT-3823] - Escape documentation while generating non escaped documetation
* [THRIFT-3854] - allow users to clear read buffers
* [THRIFT-3859] - Unix Domain Socket Support in Objective-C
* [THRIFT-3921] - C++ code should print enums as strings
* [THRIFT-3926] - There should be an error emitted when http status code is not 200
* [THRIFT-4007] - Micro-optimization of TTransport.py
* [THRIFT-4040] - Add real cause of TNonblockingServerSocket error to exception
* [THRIFT-4064] - Update node library dependencies
* [THRIFT-4069] - All perl packages should have proper namespace, version syntax, and use proper thrift exceptions
* [THRIFT-4071] - Consolidate the Travis CI jobs where possible to put less stress on the Apache Foundation's allocation of CI build slaves
* [THRIFT-4072] - Add the possibility to send custom headers in TCurlClient
* [THRIFT-4075] - Better MinGW support for headers-only boost (without thread library)
* [THRIFT-4081] - Provide a MinGW 64-bit Appveyor CI build for better pull request validation
* [THRIFT-4084] - Improve SSL security in thrift by adding a make cross client that checks to make sure SSLv3 protocol cannot be negotiated
* [THRIFT-4095] - Add multiplexed protocol to Travis CI for make cross
* [THRIFT-4099] - Auto-derive Hash for generated Rust structs
* [THRIFT-4110] - The debian build files do not produce a "-dbg" package for debug symbols of libthrift0
* [THRIFT-4114] - Space after '///' in doc comments
* [THRIFT-4126] - Validate objects in php extension
* [THRIFT-4130] - Ensure Apache Http connection is released back to pool after use
* [THRIFT-4151] - Thrift Mutex Contention Profiling (pthreads) should be disabled by default
* [THRIFT-4176] - Implement a threaded and threadpool server type for Rust
* [THRIFT-4183] - Named pipe client blocks forever on Open() when there is no server at the other end
* [THRIFT-4190] - improve C# TThreadPoolServer defaults
* [THRIFT-4197] - Implement transparent gzip compression for HTTP transport
* [THRIFT-4198] - Ruby should log Thrift internal errors to global logger
* [THRIFT-4203] - thrift server stop gracefully
* [THRIFT-4205] - c_glib is not linking against glib + gobject
* [THRIFT-4209] - warning CS0414 in T[TLS]ServerSocket.cs
* [THRIFT-4210] - include Thrift.45.csproj into CI runs
* [THRIFT-4217] - HttpClient should support gzip and deflate
* [THRIFT-4222] - Support Unix Domain Sockets in Golang TServerSocket
* [THRIFT-4233] - Make THsHaServer.invoker available (get method only) in inherited classes
* [THRIFT-4236] - Support context in go generated code.
* [THRIFT-4238] - JSON generator: make annotation-aware
* [THRIFT-4269] - Don't append '.' to Erlang namespace if it ends in '_'.
* [THRIFT-4270] - Generate Erlang mapping functions for const maps and lists
* [THRIFT-4275] - Add support for zope.interface only, apart from twisted support.
* [THRIFT-4285] - Pull generated send/recv into library to allow behaviour to be customised
* [THRIFT-4287] - Add c++ compiler "no_skeleton" flag option
* [THRIFT-4288] - Implement logging levels properly for node.js
* [THRIFT-4295] - Refresh the Docker image file suite for Ubuntu, Debian, and CentOS
* [THRIFT-4305] - Emit ddoc for generated items
* [THRIFT-4306] - Thrift imports not replicated to D service output
* [THRIFT-4315] - Add default message for TApplicationException
* [THRIFT-4318] - Delphi performance improvements
* [THRIFT-4325] - Simplify automake cross compilation by relying on one global THRIFT compiler path
* [THRIFT-4327] - Improve TimerManager API to allow removing specific task
* [THRIFT-4330] - Allow unused crates in Rust files
* [THRIFT-4333] - Erlang tutorial examples are using a different port (9999)
* [THRIFT-4343] - Change CI builds to use node.js 8.x LTS once available
* [THRIFT-4345] - Create a docker build environment that uses the minimum supported language levels
* [THRIFT-4346] - Allow Zlib transport factory to wrap other transports
* [THRIFT-4348] - Perl HTTP Client custom HTTP headers
* [THRIFT-4350] - Update netcore build for dotnet 2.0 sdk and make cross validation
* [THRIFT-4351] - Use Travis CI Build Stages to optimize the CI build
* [THRIFT-4353] - cannot read via thrift_protocol at server side
* [THRIFT-4378] - add set stopTimeoutUnit method to TThreadPoolServer
## New Feature
* [THRIFT-750] - C++ Compiler Virtual Function Option
* [THRIFT-2945] - Implement support for Rust language
* [THRIFT-3857] - thrift js:node complier support an object as parameter not an instance of struct
* [THRIFT-3933] - Port official C# .NET library for Thrift to C# .NET Core libary
* [THRIFT-4039] - Update of Apache Thrift .Net Core lib
* [THRIFT-4113] - Provide a buffer transport for reading/writing in memory byte stream
## Question
* [THRIFT-2956] - autoconf - possibly undefined macro - AC_PROG_BISON
* [THRIFT-4223] - Add support to the isServing() method for the C++ library
## Task
* [THRIFT-3622] - Fix deprecated uses of std::auto_ptr
* [THRIFT-4028] - Please remove System.out.format from the source code
* [THRIFT-4186] - Build and test rust client in Travis
## Test
* [THRIFT-4264] - PHP - Support both shared & static linking of sockets library
## Wish
* [THRIFT-4344] - Define and maintain the minimum language level for all languages in one place
Thrift 0.10.0
--------------------------------------------------------------------------------
## Bug

View file

@ -1,7 +1,7 @@
# Apache Thrift Language Support #
Last Modified: 2017-10-05<br>
Version: 0.10.0+
Version: 0.11.0+
Thrift supports many programming languages and has an impressive test suite that exercises most of the languages, protocols, and transports that represents a matrix of thousands of possible combinations. Each language typically has a minimum required version as well as support libraries - some mandatory and some optional. All of this information is provided below to help you assess whether you can use Apache Thrift with your project. Obviously this is a complex matrix to maintain and may not be correct in all cases - if you spot an error please inform the developers using the mailing list.
@ -114,7 +114,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u
<tr align=center>
<td align=left><a href="lib/netcore/README.md">.NET Core</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td colspan=2>2.0.0</td>
<!-- Language Levels -------><td>2.0.0</td><td>2.0.3</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
@ -194,7 +194,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u
<tr align=center>
<td align=left><a href="lib/lua/README.md">Lua</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>5.1.5</td><td>5.3.3</td>
<!-- Language Levels -------><td>5.1.5</td><td>5.2.4</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
@ -204,7 +204,7 @@ The Language/Library Levels indicate the minimum and maximum versions that are u
<tr align=center>
<td align=left><a href="lib/nodejs/README.md">node.js</a></td>
<!-- Build Systems ---------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Language Levels -------><td>4.2.6</td><td>8.8.1</td>
<!-- Language Levels -------><td>4.2.6</td><td>8.9.1</td>
<!-- Low-Level Transports --><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cred.png" alt=""/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>
<!-- Transport Wrappers ----><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cred.png" alt=""/></td>
<!-- Protocols -------------><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td><td><img src="doc/images/cgrn.png" alt="Yes"/></td>

View file

@ -111,9 +111,13 @@ EXTRA_DIST = \
.clang-format \
.editorconfig \
.travis.yml \
.rustfmt.toml \
.dockerignore \
appveyor.yml \
bower.json \
build \
bootstrap.sh \
cleanup.sh \
CMakeLists.txt \
composer.json \
contrib \

View file

@ -1,7 +1,7 @@
Apache Thrift
=============
Last Modified: 2017--10
Last Modified: 2017-11-10
License
=======

View file

@ -1,6 +1,6 @@
Pod::Spec.new do |s|
s.name = "Thrift"
s.version = "1.0.0"
s.version = "0.11.0"
s.summary = "Apache Thrift is a lightweight, language-independent software stack with an associated code generation mechanism for RPC."
s.description = <<-DESC
The Apache Thrift software framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages.
@ -13,6 +13,6 @@ The Apache Thrift software framework, for scalable cross-language services devel
s.osx.deployment_target = '10.8'
s.ios.framework = 'CFNetwork'
s.osx.framework = 'CoreServices'
s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-1.0.0" }
s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-0.11.0" }
s.source_files = 'lib/cocoa/src/**/*.{h,m,swift}'
end

View file

@ -1,54 +0,0 @@
dnl
dnl Check Bison version
dnl AC_PROG_BISON([MIN_VERSION=2.4])
dnl
dnl Will define BISON_USE_PARSER_H_EXTENSION if Automake is < 1.11
dnl for use with .h includes.
dnl
AC_DEFUN([AC_PROG_BISON], [
if test "x$1" = "x" ; then
bison_required_version="2.4"
else
bison_required_version="$1"
fi
AC_CHECK_PROG(have_prog_bison, [bison], [yes],[no])
AC_DEFINE_UNQUOTED([BISON_VERSION], [0.0], [Bison version if bison is not available])
#Do not use *.h extension for parser header files, use newer *.hh
bison_use_parser_h_extension=false
if test "$have_prog_bison" = "yes" ; then
AC_MSG_CHECKING([for bison version >= $bison_required_version])
bison_version=`bison --version | head -n 1 | cut '-d ' -f 4`
AC_DEFINE_UNQUOTED([BISON_VERSION], [$bison_version], [Defines bison version])
if test "$bison_version" \< "$bison_required_version" ; then
BISON=:
AC_MSG_RESULT([no])
AC_MSG_ERROR([Bison version $bison_required_version or higher must be installed on the system!])
else
AC_MSG_RESULT([yes])
BISON=bison
AC_SUBST(BISON)
#Verify automake version 1.11 headers for yy files are .h, > 1.12 uses .hh
automake_version=`automake --version | head -n 1 | cut '-d ' -f 4`
AC_DEFINE_UNQUOTED([AUTOMAKE_VERSION], [$automake_version], [Defines automake version])
if test "$automake_version" \< "1.12" ; then
#Use *.h extension for parser header file
bison_use_parser_h_extension=true
echo "Automake version < 1.12"
AC_DEFINE([BISON_USE_PARSER_H_EXTENSION], [1], [Use *.h extension for parser header file])
fi
fi
else
BISON=:
AC_MSG_RESULT([NO])
fi
AM_CONDITIONAL([BISON_USE_PARSER_H_EXTENSION], [test x$bison_use_parser_h_extension = xtrue])
AC_SUBST(BISON)
])

View file

@ -1,301 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
#
# DESCRIPTION
#
# Test for the Boost C++ libraries of a particular version (or newer)
#
# If no path to the installed boost library is given the macro searchs
# under /usr, /usr/local, /opt and /opt/local and evaluates the
# $BOOST_ROOT environment variable. Further documentation is available at
# <http://randspringer.de/boost/index.html>.
#
# This macro calls:
#
# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS)
#
# And sets:
#
# HAVE_BOOST
#
# LICENSE
#
# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
# Copyright (c) 2009 Peter Adolphs
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 42
# example boost program (need to pass version)
m4_define([_AX_BOOST_BASE_PROGRAM],
[AC_LANG_PROGRAM([[
#include <boost/version.hpp>
]],[[
(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($1))]));
]])])
AC_DEFUN([AX_BOOST_BASE],
[
AC_ARG_WITH([boost],
[AS_HELP_STRING([--with-boost@<:@=ARG@:>@],
[use Boost library from a standard location (ARG=yes),
from the specified location (ARG=<path>),
or disable it (ARG=no)
@<:@ARG=yes@:>@ ])],
[
AS_CASE([$withval],
[no],[want_boost="no";_AX_BOOST_BASE_boost_path=""],
[yes],[want_boost="yes";_AX_BOOST_BASE_boost_path=""],
[want_boost="yes";_AX_BOOST_BASE_boost_path="$withval"])
],
[want_boost="yes"])
AC_ARG_WITH([boost-libdir],
[AS_HELP_STRING([--with-boost-libdir=LIB_DIR],
[Force given directory for boost libraries.
Note that this will override library path detection,
so use this parameter only if default library detection fails
and you know exactly where your boost libraries are located.])],
[
AS_IF([test -d "$withval"],
[_AX_BOOST_BASE_boost_lib_path="$withval"],
[AC_MSG_ERROR([--with-boost-libdir expected directory name])])
],
[_AX_BOOST_BASE_boost_lib_path=""])
BOOST_LDFLAGS=""
BOOST_CPPFLAGS=""
AS_IF([test "x$want_boost" = "xyes"],
[_AX_BOOST_BASE_RUNDETECT([$1],[$2],[$3])])
AC_SUBST(BOOST_CPPFLAGS)
AC_SUBST(BOOST_LDFLAGS)
])
# convert a version string in $2 to numeric and affect to polymorphic var $1
AC_DEFUN([_AX_BOOST_BASE_TONUMERICVERSION],[
AS_IF([test "x$2" = "x"],[_AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0"],[_AX_BOOST_BASE_TONUMERICVERSION_req="$2"])
_AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\.[[0-9]]*\)'`
_AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\)'`
AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"],
[AC_MSG_ERROR([You should at least specify libboost major version])])
_AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.\([[0-9]]*\)'`
AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"],
[_AX_BOOST_BASE_TONUMERICVERSION_req_minor="0"])
_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
AS_IF([test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"],
[_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0"])
_AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor`
AS_VAR_SET($1,$_AX_BOOST_BASE_TONUMERICVERSION_RET)
])
dnl Run the detection of boost should be run only if $want_boost
AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[
_AX_BOOST_BASE_TONUMERICVERSION(WANT_BOOST_VERSION,[$1])
succeeded=no
AC_REQUIRE([AC_CANONICAL_HOST])
dnl On 64-bit systems check for system libraries in both lib64 and lib.
dnl The former is specified by FHS, but e.g. Debian does not adhere to
dnl this (as it rises problems for generic multi-arch support).
dnl The last entry in the list is chosen by default when no libraries
dnl are found, e.g. when only header-only libraries are installed!
AS_CASE([${host_cpu}],
[x86_64],[libsubdirs="lib64 libx32 lib lib64"],
[ppc64|s390x|sparc64|aarch64|ppc64le],[libsubdirs="lib64 lib lib64"],
[libsubdirs="lib"],
)
dnl allow for real multi-arch paths e.g. /usr/lib/x86_64-linux-gnu. Give
dnl them priority over the other paths since, if libs are found there, they
dnl are almost assuredly the ones desired.
AS_CASE([${host_cpu}],
[i?86],[multiarch_libsubdir="lib/i386-${host_os}"],
[multiarch_libsubdir="lib/${host_cpu}-${host_os}"]
)
dnl first we check the system location for boost libraries
dnl this location ist chosen if boost libraries are installed with the --layout=system option
dnl or if you install boost with RPM
AS_IF([test "x$_AX_BOOST_BASE_boost_path" != "x"],[
AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) includes in "$_AX_BOOST_BASE_boost_path/include"])
AS_IF([test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"],[
AC_MSG_RESULT([yes])
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include"
for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do
AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) lib path in "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"])
AS_IF([test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ],[
AC_MSG_RESULT([yes])
BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp";
break;
],
[AC_MSG_RESULT([no])])
done],[
AC_MSG_RESULT([no])])
],[
if test X"$cross_compiling" = Xyes; then
search_libsubdirs=$multiarch_libsubdir
else
search_libsubdirs="$multiarch_libsubdir $libsubdirs"
fi
for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local ; do
if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then
for libsubdir in $search_libsubdirs ; do
if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
done
BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir"
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include"
break;
fi
done
])
dnl overwrite ld flags if we have required special directory with
dnl --with-boost-libdir parameter
AS_IF([test "x$_AX_BOOST_BASE_boost_lib_path" != "x"],
[BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path"])
AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION)])
CPPFLAGS_SAVED="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
export CPPFLAGS
LDFLAGS_SAVED="$LDFLAGS"
LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
export LDFLAGS
AC_REQUIRE([AC_PROG_CXX])
AC_LANG_PUSH(C++)
AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[
AC_MSG_RESULT(yes)
succeeded=yes
found_system=yes
],[
])
AC_LANG_POP([C++])
dnl if we found no boost with system layout we search for boost libraries
dnl built and installed without the --layout=system option or for a staged(not installed) version
if test "x$succeeded" != "xyes" ; then
CPPFLAGS="$CPPFLAGS_SAVED"
LDFLAGS="$LDFLAGS_SAVED"
BOOST_CPPFLAGS=
if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
BOOST_LDFLAGS=
fi
_version=0
if test -n "$_AX_BOOST_BASE_boost_path" ; then
if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then
for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do
_version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
V_CHECK=`expr $_version_tmp \> $_version`
if test "x$V_CHECK" = "x1" ; then
_version=$_version_tmp
fi
VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE"
done
dnl if nothing found search for layout used in Windows distributions
if test -z "$BOOST_CPPFLAGS"; then
if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then
BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path"
fi
fi
dnl if we found something and BOOST_LDFLAGS was unset before
dnl (because "$_AX_BOOST_BASE_boost_lib_path" = ""), set it here.
if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then
for libsubdir in $libsubdirs ; do
if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
done
BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir"
fi
fi
else
if test "x$cross_compiling" != "xyes" ; then
for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local ; do
if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then
for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do
_version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'`
V_CHECK=`expr $_version_tmp \> $_version`
if test "x$V_CHECK" = "x1" ; then
_version=$_version_tmp
best_path=$_AX_BOOST_BASE_boost_path
fi
done
fi
done
VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'`
BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE"
if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
for libsubdir in $libsubdirs ; do
if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
done
BOOST_LDFLAGS="-L$best_path/$libsubdir"
fi
fi
if test -n "$BOOST_ROOT" ; then
for libsubdir in $libsubdirs ; do
if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi
done
if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then
version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'`
stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'`
stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'`
V_CHECK=`expr $stage_version_shorten \>\= $_version`
if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then
AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT)
BOOST_CPPFLAGS="-I$BOOST_ROOT"
BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir"
fi
fi
fi
fi
CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
export CPPFLAGS
LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
export LDFLAGS
AC_LANG_PUSH(C++)
AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[
AC_MSG_RESULT(yes)
succeeded=yes
found_system=yes
],[
])
AC_LANG_POP([C++])
fi
if test "x$succeeded" != "xyes" ; then
if test "x$_version" = "x0" ; then
AC_MSG_NOTICE([[We could not detect the boost libraries (version $1 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in <boost/version.hpp>. See http://randspringer.de/boost for more documentation.]])
else
AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).])
fi
# execute ACTION-IF-NOT-FOUND (if present):
ifelse([$3], , :, [$3])
else
AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available])
# execute ACTION-IF-FOUND (if present):
ifelse([$2], , :, [$2])
fi
CPPFLAGS="$CPPFLAGS_SAVED"
LDFLAGS="$LDFLAGS_SAVED"
])

View file

@ -1,124 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_check_openssl.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_CHECK_OPENSSL([action-if-found[, action-if-not-found]])
#
# DESCRIPTION
#
# Look for OpenSSL in a number of default spots, or in a user-selected
# spot (via --with-openssl). Sets
#
# OPENSSL_INCLUDES to the include directives required
# OPENSSL_LIBS to the -l directives required
# OPENSSL_LDFLAGS to the -L or -R flags required
#
# and calls ACTION-IF-FOUND or ACTION-IF-NOT-FOUND appropriately
#
# This macro sets OPENSSL_INCLUDES such that source files should use the
# openssl/ directory in include directives:
#
# #include <openssl/hmac.h>
#
# LICENSE
#
# Copyright (c) 2009,2010 Zmanda Inc. <http://www.zmanda.com/>
# Copyright (c) 2009,2010 Dustin J. Mitchell <dustin@zmanda.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 10
AU_ALIAS([CHECK_SSL], [AX_CHECK_OPENSSL])
AC_DEFUN([AX_CHECK_OPENSSL], [
found=false
AC_ARG_WITH([openssl],
[AS_HELP_STRING([--with-openssl=DIR],
[root of the OpenSSL directory])],
[
case "$withval" in
"" | y | ye | yes | n | no)
AC_MSG_ERROR([Invalid --with-openssl value])
;;
*) ssldirs="$withval"
;;
esac
], [
# if pkg-config is installed and openssl has installed a .pc file,
# then use that information and don't search ssldirs
AC_CHECK_TOOL([PKG_CONFIG], [pkg-config])
if test x"$PKG_CONFIG" != x""; then
OPENSSL_LDFLAGS=`$PKG_CONFIG openssl --libs-only-L 2>/dev/null`
if test $? = 0; then
OPENSSL_LIBS=`$PKG_CONFIG openssl --libs-only-l 2>/dev/null`
OPENSSL_INCLUDES=`$PKG_CONFIG openssl --cflags-only-I 2>/dev/null`
found=true
fi
fi
# no such luck; use some default ssldirs
if ! $found; then
ssldirs="/usr/local/ssl /usr/lib/ssl /usr/ssl /usr/pkg /usr/local /usr"
fi
]
)
# note that we #include <openssl/foo.h>, so the OpenSSL headers have to be in
# an 'openssl' subdirectory
if ! $found; then
OPENSSL_INCLUDES=
for ssldir in $ssldirs; do
AC_MSG_CHECKING([for openssl/ssl.h in $ssldir])
if test -f "$ssldir/include/openssl/ssl.h"; then
OPENSSL_INCLUDES="-I$ssldir/include"
OPENSSL_LDFLAGS="-L$ssldir/lib"
OPENSSL_LIBS="-lssl -lcrypto"
found=true
AC_MSG_RESULT([yes])
break
else
AC_MSG_RESULT([no])
fi
done
# if the file wasn't found, well, go ahead and try the link anyway -- maybe
# it will just work!
fi
# try the preprocessor and linker with our new flags,
# being careful not to pollute the global LIBS, LDFLAGS, and CPPFLAGS
AC_MSG_CHECKING([whether compiling and linking against OpenSSL works])
echo "Trying link with OPENSSL_LDFLAGS=$OPENSSL_LDFLAGS;" \
"OPENSSL_LIBS=$OPENSSL_LIBS; OPENSSL_INCLUDES=$OPENSSL_INCLUDES" >&AS_MESSAGE_LOG_FD
save_LIBS="$LIBS"
save_LDFLAGS="$LDFLAGS"
save_CPPFLAGS="$CPPFLAGS"
LDFLAGS="$LDFLAGS $OPENSSL_LDFLAGS"
LIBS="$OPENSSL_LIBS $LIBS"
CPPFLAGS="$OPENSSL_INCLUDES $CPPFLAGS"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([#include <openssl/ssl.h>], [SSL_new(NULL)])],
[
AC_MSG_RESULT([yes])
$1
], [
AC_MSG_RESULT([no])
$2
])
CPPFLAGS="$save_CPPFLAGS"
LDFLAGS="$save_LDFLAGS"
LIBS="$save_LIBS"
AC_SUBST([OPENSSL_INCLUDES])
AC_SUBST([OPENSSL_LIBS])
AC_SUBST([OPENSSL_LDFLAGS])
])

View file

@ -1,177 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_compare_version.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
#
# DESCRIPTION
#
# This macro compares two version strings. Due to the various number of
# minor-version numbers that can exist, and the fact that string
# comparisons are not compatible with numeric comparisons, this is not
# necessarily trivial to do in a autoconf script. This macro makes doing
# these comparisons easy.
#
# The six basic comparisons are available, as well as checking equality
# limited to a certain number of minor-version levels.
#
# The operator OP determines what type of comparison to do, and can be one
# of:
#
# eq - equal (test A == B)
# ne - not equal (test A != B)
# le - less than or equal (test A <= B)
# ge - greater than or equal (test A >= B)
# lt - less than (test A < B)
# gt - greater than (test A > B)
#
# Additionally, the eq and ne operator can have a number after it to limit
# the test to that number of minor versions.
#
# eq0 - equal up to the length of the shorter version
# ne0 - not equal up to the length of the shorter version
# eqN - equal up to N sub-version levels
# neN - not equal up to N sub-version levels
#
# When the condition is true, shell commands ACTION-IF-TRUE are run,
# otherwise shell commands ACTION-IF-FALSE are run. The environment
# variable 'ax_compare_version' is always set to either 'true' or 'false'
# as well.
#
# Examples:
#
# AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8])
# AX_COMPARE_VERSION([3.15],[lt],[3.15.8])
#
# would both be true.
#
# AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8])
# AX_COMPARE_VERSION([3.15],[gt],[3.15.8])
#
# would both be false.
#
# AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8])
#
# would be true because it is only comparing two minor versions.
#
# AX_COMPARE_VERSION([3.15.7],[eq0],[3.15])
#
# would be true because it is only comparing the lesser number of minor
# versions of the two values.
#
# Note: The characters that separate the version numbers do not matter. An
# empty string is the same as version 0. OP is evaluated by autoconf, not
# configure, so must be a string, not a variable.
#
# The author would like to acknowledge Guido Draheim whose advice about
# the m4_case and m4_ifvaln functions make this macro only include the
# portions necessary to perform the specific comparison specified by the
# OP argument in the final configure script.
#
# LICENSE
#
# Copyright (c) 2008 Tim Toolan <toolan@ele.uri.edu>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 12
dnl #########################################################################
AC_DEFUN([AX_COMPARE_VERSION], [
AC_REQUIRE([AC_PROG_AWK])
# Used to indicate true or false condition
ax_compare_version=false
# Convert the two version strings to be compared into a format that
# allows a simple string comparison. The end result is that a version
# string of the form 1.12.5-r617 will be converted to the form
# 0001001200050617. In other words, each number is zero padded to four
# digits, and non digits are removed.
AS_VAR_PUSHDEF([A],[ax_compare_version_A])
A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
AS_VAR_PUSHDEF([B],[ax_compare_version_B])
B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \
-e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \
-e 's/[[^0-9]]//g'`
dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary
dnl # then the first line is used to determine if the condition is true.
dnl # The sed right after the echo is to remove any indented white space.
m4_case(m4_tolower($2),
[lt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[gt],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"`
],
[le],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],
[ge],[
ax_compare_version=`echo "x$A
x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"`
],[
dnl Split the operator from the subversion count if present.
m4_bmatch(m4_substr($2,2),
[0],[
# A count of zero means use the length of the shorter version.
# Determine the number of characters in A and B.
ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'`
ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'`
# Set A to no more than B's length and B to no more than A's length.
A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"`
B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"`
],
[[0-9]+],[
# A count greater than zero means use only that many subversions
A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"`
],
[.+],[
AC_WARNING(
[illegal OP numeric parameter: $2])
],[])
# Pad zeros at end of numbers to make same length.
ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`"
B="$B`echo $A | sed 's/./0/g'`"
A="$ax_compare_version_tmp_A"
# Check for equality or inequality as necessary.
m4_case(m4_tolower(m4_substr($2,0,2)),
[eq],[
test "x$A" = "x$B" && ax_compare_version=true
],
[ne],[
test "x$A" != "x$B" && ax_compare_version=true
],[
AC_WARNING([illegal OP parameter: $2])
])
])
AS_VAR_POPDEF([A])dnl
AS_VAR_POPDEF([B])dnl
dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE.
if test "$ax_compare_version" = "true" ; then
m4_ifvaln([$4],[$4],[:])dnl
m4_ifvaln([$5],[else $5])dnl
fi
]) dnl AX_COMPARE_VERSION

View file

@ -1,982 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
#
# DESCRIPTION
#
# Check for baseline language coverage in the compiler for the specified
# version of the C++ standard. If necessary, add switches to CXX and
# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
# or '14' (for the C++14 standard).
#
# The second argument, if specified, indicates whether you insist on an
# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
# -std=c++11). If neither is specified, you get whatever works, with
# preference for an extended mode.
#
# The third argument, if specified 'mandatory' or if left unspecified,
# indicates that baseline support for the specified C++ standard is
# required and that the macro should error out if no mode with that
# support is found. If specified 'optional', then configuration proceeds
# regardless, after defining HAVE_CXX${VERSION} if and only if a
# supporting mode is found.
#
# LICENSE
#
# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
# Copyright (c) 2015 Paul Norman <penorman@mac.com>
# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
# Copyright (c) 2016 Krzesimir Nowak <qdlacz@gmail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 7
dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
dnl (serial version number 13).
AX_REQUIRE_DEFINED([AC_MSG_WARN])
AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
[$1], [14], [ax_cxx_compile_alternatives="14 1y"],
[$1], [17], [ax_cxx_compile_alternatives="17 1z"],
[m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
m4_if([$2], [], [],
[$2], [ext], [],
[$2], [noext], [],
[m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
[$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
[$3], [optional], [ax_cxx_compile_cxx$1_required=false],
[m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
AC_LANG_PUSH([C++])dnl
ac_success=no
AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
ax_cv_cxx_compile_cxx$1,
[AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
[ax_cv_cxx_compile_cxx$1=yes],
[ax_cv_cxx_compile_cxx$1=no])])
if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
ac_success=yes
fi
m4_if([$2], [noext], [], [dnl
if test x$ac_success = xno; then
for alternative in ${ax_cxx_compile_alternatives}; do
switch="-std=gnu++${alternative}"
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
[ac_save_CXX="$CXX"
CXX="$CXX $switch"
AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
[eval $cachevar=yes],
[eval $cachevar=no])
CXX="$ac_save_CXX"])
if eval test x\$$cachevar = xyes; then
CXX="$CXX $switch"
if test -n "$CXXCPP" ; then
CXXCPP="$CXXCPP $switch"
fi
ac_success=yes
break
fi
done
fi])
m4_if([$2], [ext], [], [dnl
if test x$ac_success = xno; then
dnl HP's aCC needs +std=c++11 according to:
dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
dnl Cray's crayCC needs "-h std=c++11"
for alternative in ${ax_cxx_compile_alternatives}; do
for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
$cachevar,
[ac_save_CXX="$CXX"
CXX="$CXX $switch"
AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
[eval $cachevar=yes],
[eval $cachevar=no])
CXX="$ac_save_CXX"])
if eval test x\$$cachevar = xyes; then
CXX="$CXX $switch"
if test -n "$CXXCPP" ; then
CXXCPP="$CXXCPP $switch"
fi
ac_success=yes
break
fi
done
if test x$ac_success = xyes; then
break
fi
done
fi])
AC_LANG_POP([C++])
if test x$ax_cxx_compile_cxx$1_required = xtrue; then
if test x$ac_success = xno; then
AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
fi
fi
if test x$ac_success = xno; then
HAVE_CXX$1=0
AC_MSG_NOTICE([No compiler with C++$1 support was found])
else
HAVE_CXX$1=1
AC_DEFINE(HAVE_CXX$1,1,
[define if the compiler supports basic C++$1 syntax])
fi
AC_SUBST(HAVE_CXX$1)
m4_if([$1], [17], [AC_MSG_WARN([C++17 is not yet standardized, so the checks may change in incompatible ways anytime])])
])
dnl Test body for checking C++11 support
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_11
)
dnl Test body for checking C++14 support
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_11
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
)
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
_AX_CXX_COMPILE_STDCXX_testbody_new_in_11
_AX_CXX_COMPILE_STDCXX_testbody_new_in_14
_AX_CXX_COMPILE_STDCXX_testbody_new_in_17
)
dnl Tests for new features in C++11
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
// If the compiler admits that it is not ready for C++11, why torture it?
// Hopefully, this will speed up the test.
#ifndef __cplusplus
#error "This is not a C++ compiler"
#elif __cplusplus < 201103L
#error "This is not a C++11 compiler"
#else
namespace cxx11
{
namespace test_static_assert
{
template <typename T>
struct check
{
static_assert(sizeof(int) <= sizeof(T), "not big enough");
};
}
namespace test_final_override
{
struct Base
{
virtual void f() {}
};
struct Derived : public Base
{
virtual void f() override {}
};
}
namespace test_double_right_angle_brackets
{
template < typename T >
struct check {};
typedef check<void> single_type;
typedef check<check<void>> double_type;
typedef check<check<check<void>>> triple_type;
typedef check<check<check<check<void>>>> quadruple_type;
}
namespace test_decltype
{
int
f()
{
int a = 1;
decltype(a) b = 2;
return a + b;
}
}
namespace test_type_deduction
{
template < typename T1, typename T2 >
struct is_same
{
static const bool value = false;
};
template < typename T >
struct is_same<T, T>
{
static const bool value = true;
};
template < typename T1, typename T2 >
auto
add(T1 a1, T2 a2) -> decltype(a1 + a2)
{
return a1 + a2;
}
int
test(const int c, volatile int v)
{
static_assert(is_same<int, decltype(0)>::value == true, "");
static_assert(is_same<int, decltype(c)>::value == false, "");
static_assert(is_same<int, decltype(v)>::value == false, "");
auto ac = c;
auto av = v;
auto sumi = ac + av + 'x';
auto sumf = ac + av + 1.0;
static_assert(is_same<int, decltype(ac)>::value == true, "");
static_assert(is_same<int, decltype(av)>::value == true, "");
static_assert(is_same<int, decltype(sumi)>::value == true, "");
static_assert(is_same<int, decltype(sumf)>::value == false, "");
static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
return (sumf > 0.0) ? sumi : add(c, v);
}
}
namespace test_noexcept
{
int f() { return 0; }
int g() noexcept { return 0; }
static_assert(noexcept(f()) == false, "");
static_assert(noexcept(g()) == true, "");
}
namespace test_constexpr
{
template < typename CharT >
unsigned long constexpr
strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
{
return *s ? strlen_c_r(s + 1, acc + 1) : acc;
}
template < typename CharT >
unsigned long constexpr
strlen_c(const CharT *const s) noexcept
{
return strlen_c_r(s, 0UL);
}
static_assert(strlen_c("") == 0UL, "");
static_assert(strlen_c("1") == 1UL, "");
static_assert(strlen_c("example") == 7UL, "");
static_assert(strlen_c("another\0example") == 7UL, "");
}
namespace test_rvalue_references
{
template < int N >
struct answer
{
static constexpr int value = N;
};
answer<1> f(int&) { return answer<1>(); }
answer<2> f(const int&) { return answer<2>(); }
answer<3> f(int&&) { return answer<3>(); }
void
test()
{
int i = 0;
const int c = 0;
static_assert(decltype(f(i))::value == 1, "");
static_assert(decltype(f(c))::value == 2, "");
static_assert(decltype(f(0))::value == 3, "");
}
}
namespace test_uniform_initialization
{
struct test
{
static const int zero {};
static const int one {1};
};
static_assert(test::zero == 0, "");
static_assert(test::one == 1, "");
}
namespace test_lambdas
{
void
test1()
{
auto lambda1 = [](){};
auto lambda2 = lambda1;
lambda1();
lambda2();
}
int
test2()
{
auto a = [](int i, int j){ return i + j; }(1, 2);
auto b = []() -> int { return '0'; }();
auto c = [=](){ return a + b; }();
auto d = [&](){ return c; }();
auto e = [a, &b](int x) mutable {
const auto identity = [](int y){ return y; };
for (auto i = 0; i < a; ++i)
a += b--;
return x + identity(a + b);
}(0);
return a + b + c + d + e;
}
int
test3()
{
const auto nullary = [](){ return 0; };
const auto unary = [](int x){ return x; };
using nullary_t = decltype(nullary);
using unary_t = decltype(unary);
const auto higher1st = [](nullary_t f){ return f(); };
const auto higher2nd = [unary](nullary_t f1){
return [unary, f1](unary_t f2){ return f2(unary(f1())); };
};
return higher1st(nullary) + higher2nd(nullary)(unary);
}
}
namespace test_variadic_templates
{
template <int...>
struct sum;
template <int N0, int... N1toN>
struct sum<N0, N1toN...>
{
static constexpr auto value = N0 + sum<N1toN...>::value;
};
template <>
struct sum<>
{
static constexpr auto value = 0;
};
static_assert(sum<>::value == 0, "");
static_assert(sum<1>::value == 1, "");
static_assert(sum<23>::value == 23, "");
static_assert(sum<1, 2>::value == 3, "");
static_assert(sum<5, 5, 11>::value == 21, "");
static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
}
// http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
// Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
// because of this.
namespace test_template_alias_sfinae
{
struct foo {};
template<typename T>
using member = typename T::member_type;
template<typename T>
void func(...) {}
template<typename T>
void func(member<T>*) {}
void test();
void test() { func<foo>(0); }
}
} // namespace cxx11
#endif // __cplusplus >= 201103L
]])
dnl Tests for new features in C++14
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
// If the compiler admits that it is not ready for C++14, why torture it?
// Hopefully, this will speed up the test.
#ifndef __cplusplus
#error "This is not a C++ compiler"
#elif __cplusplus < 201402L
#error "This is not a C++14 compiler"
#else
namespace cxx14
{
namespace test_polymorphic_lambdas
{
int
test()
{
const auto lambda = [](auto&&... args){
const auto istiny = [](auto x){
return (sizeof(x) == 1UL) ? 1 : 0;
};
const int aretiny[] = { istiny(args)... };
return aretiny[0];
};
return lambda(1, 1L, 1.0f, '1');
}
}
namespace test_binary_literals
{
constexpr auto ivii = 0b0000000000101010;
static_assert(ivii == 42, "wrong value");
}
namespace test_generalized_constexpr
{
template < typename CharT >
constexpr unsigned long
strlen_c(const CharT *const s) noexcept
{
auto length = 0UL;
for (auto p = s; *p; ++p)
++length;
return length;
}
static_assert(strlen_c("") == 0UL, "");
static_assert(strlen_c("x") == 1UL, "");
static_assert(strlen_c("test") == 4UL, "");
static_assert(strlen_c("another\0test") == 7UL, "");
}
namespace test_lambda_init_capture
{
int
test()
{
auto x = 0;
const auto lambda1 = [a = x](int b){ return a + b; };
const auto lambda2 = [a = lambda1(x)](){ return a; };
return lambda2();
}
}
namespace test_digit_separators
{
constexpr auto ten_million = 100'000'000;
static_assert(ten_million == 100000000, "");
}
namespace test_return_type_deduction
{
auto f(int& x) { return x; }
decltype(auto) g(int& x) { return x; }
template < typename T1, typename T2 >
struct is_same
{
static constexpr auto value = false;
};
template < typename T >
struct is_same<T, T>
{
static constexpr auto value = true;
};
int
test()
{
auto x = 0;
static_assert(is_same<int, decltype(f(x))>::value, "");
static_assert(is_same<int&, decltype(g(x))>::value, "");
return x;
}
}
} // namespace cxx14
#endif // __cplusplus >= 201402L
]])
dnl Tests for new features in C++17
m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
// If the compiler admits that it is not ready for C++17, why torture it?
// Hopefully, this will speed up the test.
#ifndef __cplusplus
#error "This is not a C++ compiler"
#elif __cplusplus <= 201402L
#error "This is not a C++17 compiler"
#else
#if defined(__clang__)
#define REALLY_CLANG
#else
#if defined(__GNUC__)
#define REALLY_GCC
#endif
#endif
#include <initializer_list>
#include <utility>
#include <type_traits>
namespace cxx17
{
#if !defined(REALLY_CLANG)
namespace test_constexpr_lambdas
{
// TODO: test it with clang++ from git
constexpr int foo = [](){return 42;}();
}
#endif // !defined(REALLY_CLANG)
namespace test::nested_namespace::definitions
{
}
namespace test_fold_expression
{
template<typename... Args>
int multiply(Args... args)
{
return (args * ... * 1);
}
template<typename... Args>
bool all(Args... args)
{
return (args && ...);
}
}
namespace test_extended_static_assert
{
static_assert (true);
}
namespace test_auto_brace_init_list
{
auto foo = {5};
auto bar {5};
static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
static_assert(std::is_same<int, decltype(bar)>::value);
}
namespace test_typename_in_template_template_parameter
{
template<template<typename> typename X> struct D;
}
namespace test_fallthrough_nodiscard_maybe_unused_attributes
{
int f1()
{
return 42;
}
[[nodiscard]] int f2()
{
[[maybe_unused]] auto unused = f1();
switch (f1())
{
case 17:
f1();
[[fallthrough]];
case 42:
f1();
}
return f1();
}
}
namespace test_extended_aggregate_initialization
{
struct base1
{
int b1, b2 = 42;
};
struct base2
{
base2() {
b3 = 42;
}
int b3;
};
struct derived : base1, base2
{
int d;
};
derived d1 {{1, 2}, {}, 4}; // full initialization
derived d2 {{}, {}, 4}; // value-initialized bases
}
namespace test_general_range_based_for_loop
{
struct iter
{
int i;
int& operator* ()
{
return i;
}
const int& operator* () const
{
return i;
}
iter& operator++()
{
++i;
return *this;
}
};
struct sentinel
{
int i;
};
bool operator== (const iter& i, const sentinel& s)
{
return i.i == s.i;
}
bool operator!= (const iter& i, const sentinel& s)
{
return !(i == s);
}
struct range
{
iter begin() const
{
return {0};
}
sentinel end() const
{
return {5};
}
};
void f()
{
range r {};
for (auto i : r)
{
[[maybe_unused]] auto v = i;
}
}
}
namespace test_lambda_capture_asterisk_this_by_value
{
struct t
{
int i;
int foo()
{
return [*this]()
{
return i;
}();
}
};
}
namespace test_enum_class_construction
{
enum class byte : unsigned char
{};
byte foo {42};
}
namespace test_constexpr_if
{
template <bool cond>
int f ()
{
if constexpr(cond)
{
return 13;
}
else
{
return 42;
}
}
}
namespace test_selection_statement_with_initializer
{
int f()
{
return 13;
}
int f2()
{
if (auto i = f(); i > 0)
{
return 3;
}
switch (auto i = f(); i + 4)
{
case 17:
return 2;
default:
return 1;
}
}
}
#if !defined(REALLY_CLANG)
namespace test_template_argument_deduction_for_class_templates
{
// TODO: test it with clang++ from git
template <typename T1, typename T2>
struct pair
{
pair (T1 p1, T2 p2)
: m1 {p1},
m2 {p2}
{}
T1 m1;
T2 m2;
};
void f()
{
[[maybe_unused]] auto p = pair{13, 42u};
}
}
#endif // !defined(REALLY_CLANG)
namespace test_non_type_auto_template_parameters
{
template <auto n>
struct B
{};
B<5> b1;
B<'a'> b2;
}
#if !defined(REALLY_CLANG)
namespace test_structured_bindings
{
// TODO: test it with clang++ from git
int arr[2] = { 1, 2 };
std::pair<int, int> pr = { 1, 2 };
auto f1() -> int(&)[2]
{
return arr;
}
auto f2() -> std::pair<int, int>&
{
return pr;
}
struct S
{
int x1 : 2;
volatile double y1;
};
S f3()
{
return {};
}
auto [ x1, y1 ] = f1();
auto& [ xr1, yr1 ] = f1();
auto [ x2, y2 ] = f2();
auto& [ xr2, yr2 ] = f2();
const auto [ x3, y3 ] = f3();
}
#endif // !defined(REALLY_CLANG)
#if !defined(REALLY_CLANG)
namespace test_exception_spec_type_system
{
// TODO: test it with clang++ from git
struct Good {};
struct Bad {};
void g1() noexcept;
void g2();
template<typename T>
Bad
f(T*, T*);
template<typename T1, typename T2>
Good
f(T1*, T2*);
static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
}
#endif // !defined(REALLY_CLANG)
namespace test_inline_variables
{
template<class T> void f(T)
{}
template<class T> inline T g(T)
{
return T{};
}
template<> inline void f<>(int)
{}
template<> int g<>(int)
{
return 5;
}
}
} // namespace cxx17
#endif // __cplusplus <= 201402L
]])

View file

@ -1,39 +0,0 @@
# =============================================================================
# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx_11.html
# =============================================================================
#
# SYNOPSIS
#
# AX_CXX_COMPILE_STDCXX_11([ext|noext], [mandatory|optional])
#
# DESCRIPTION
#
# Check for baseline language coverage in the compiler for the C++11
# standard; if necessary, add switches to CXX and CXXCPP to enable
# support.
#
# This macro is a convenience alias for calling the AX_CXX_COMPILE_STDCXX
# macro with the version set to C++11. The two optional arguments are
# forwarded literally as the second and third argument respectively.
# Please see the documentation for the AX_CXX_COMPILE_STDCXX macro for
# more information. If you want to use this macro, you also need to
# download the ax_cxx_compile_stdcxx.m4 file.
#
# LICENSE
#
# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
# Copyright (c) 2015 Paul Norman <penorman@mac.com>
# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 18
AX_REQUIRE_DEFINED([AX_CXX_COMPILE_STDCXX])
AC_DEFUN([AX_CXX_COMPILE_STDCXX_11], [AX_CXX_COMPILE_STDCXX([11], [$1], [$2])])

View file

@ -1,107 +0,0 @@
dnl @synopsis AX_DMD
dnl
dnl Test for the presence of a DMD-compatible D2 compiler, and (optionally)
dnl specified modules on the import path.
dnl
dnl If "DMD" is defined in the environment, that will be the only
dnl dmd command tested. Otherwise, a hard-coded list will be used.
dnl
dnl After AX_DMD runs, the shell variables "success" and "ax_dmd" are set to
dnl "yes" or "no", and "DMD" is set to the appropriate command. Furthermore,
dnl "dmd_optlink" will be set to "yes" or "no" depending on whether OPTLINK is
dnl used as the linker (DMD/Windows), and "dmd_of_dirsep" will be set to the
dnl directory separator to use when passing -of to DMD (OPTLINK requires a
dnl backslash).
dnl
dnl AX_CHECK_D_MODULE must be run after AX_DMD. It tests for the presence of a
dnl module in the import path of the chosen compiler, and sets the shell
dnl variable "success" to "yes" or "no".
dnl
dnl @category D
dnl @version 2011-05-31
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copyright (C) 2011 David Nadlinger
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
AC_DEFUN([AX_DMD],
[
dnl Hard-coded default commands to test.
DMD_PROGS="dmd,gdmd,ldmd"
dnl Allow the user to specify an alternative.
if test -n "$DMD" ; then
DMD_PROGS="$DMD"
fi
AC_MSG_CHECKING(for DMD)
# std.algorithm as a quick way to check for D2/Phobos.
echo "import std.algorithm; void main() {}" > configtest_ax_dmd.d
success=no
oIFS="$IFS"
IFS=","
for DMD in $DMD_PROGS ; do
IFS="$oIFS"
echo "Running \"$DMD configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
if $DMD configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
success=yes
break
fi
done
if test "$success" != "yes" ; then
AC_MSG_RESULT(no)
DMD=""
else
AC_MSG_RESULT(yes)
fi
ax_dmd="$success"
# Test whether OPTLINK is used by trying if DMD accepts -L/? without
# erroring out.
if test "$success" == "yes" ; then
AC_MSG_CHECKING(whether DMD uses OPTLINK)
echo "Running \”$DMD -L/? configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
if $DMD -L/? configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
AC_MSG_RESULT(yes)
dmd_optlink="yes"
# This actually produces double slashes in the final configure
# output, but at least it works.
dmd_of_dirsep="\\\\"
else
AC_MSG_RESULT(no)
dmd_optlink="no"
dmd_of_dirsep="/"
fi
fi
rm -f configtest_ax_dmd*
])
AC_DEFUN([AX_CHECK_D_MODULE],
[
AC_MSG_CHECKING(for D module [$1])
echo "import $1; void main() {}" > configtest_ax_dmd.d
echo "Running \"$DMD configtest_ax_dmd.d\"" >&AS_MESSAGE_LOG_FD
if $DMD -c configtest_ax_dmd.d >&AS_MESSAGE_LOG_FD 2>&1 ; then
AC_MSG_RESULT(yes)
success=yes
else
AC_MSG_RESULT(no)
success=no
fi
rm -f configtest_ax_dmd*
])

View file

@ -1,129 +0,0 @@
dnl @synopsis AX_JAVAC_AND_JAVA
dnl @synopsis AX_CHECK_JAVA_CLASS(CLASSNAME)
dnl
dnl Test for the presence of a JDK, and (optionally) specific classes.
dnl
dnl If "JAVA" is defined in the environment, that will be the only
dnl java command tested. Otherwise, a hard-coded list will be used.
dnl Similarly for "JAVAC".
dnl
dnl AX_JAVAC_AND_JAVA does not currently support testing for a particular
dnl Java version, testing for only one of "java" and "javac", or
dnl compiling or running user-provided Java code.
dnl
dnl After AX_JAVAC_AND_JAVA runs, the shell variables "success" and
dnl "ax_javac_and_java" are set to "yes" or "no", and "JAVAC" and
dnl "JAVA" are set to the appropriate commands.
dnl
dnl AX_CHECK_JAVA_CLASS must be run after AX_JAVAC_AND_JAVA.
dnl It tests for the presence of a class based on a fully-qualified name.
dnl It sets the shell variable "success" to "yes" or "no".
dnl
dnl @category Java
dnl @version 2009-02-09
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
AC_DEFUN([AX_JAVAC_AND_JAVA],
[
dnl Hard-coded default commands to test.
JAVAC_PROGS="javac,jikes,gcj -C"
JAVA_PROGS="java,kaffe"
dnl Allow the user to specify an alternative.
if test -n "$JAVAC" ; then
JAVAC_PROGS="$JAVAC"
fi
if test -n "$JAVA" ; then
JAVA_PROGS="$JAVA"
fi
AC_MSG_CHECKING(for javac and java)
echo "public class configtest_ax_javac_and_java { public static void main(String args@<:@@:>@) { } }" > configtest_ax_javac_and_java.java
success=no
oIFS="$IFS"
IFS=","
for JAVAC in $JAVAC_PROGS ; do
IFS="$oIFS"
echo "Running \"$JAVAC configtest_ax_javac_and_java.java\"" >&AS_MESSAGE_LOG_FD
if $JAVAC configtest_ax_javac_and_java.java >&AS_MESSAGE_LOG_FD 2>&1 ; then
# prevent $JAVA VM issues with UTF-8 path names (THRIFT-3271)
oLC_ALL="$LC_ALL"
LC_ALL=""
IFS=","
for JAVA in $JAVA_PROGS ; do
IFS="$oIFS"
echo "Running \"$JAVA configtest_ax_javac_and_java\"" >&AS_MESSAGE_LOG_FD
if $JAVA configtest_ax_javac_and_java >&AS_MESSAGE_LOG_FD 2>&1 ; then
success=yes
break 2
fi
done
# restore LC_ALL
LC_ALL="$oLC_ALL"
oLC_ALL=""
fi
done
rm -f configtest_ax_javac_and_java.java configtest_ax_javac_and_java.class
if test "$success" != "yes" ; then
AC_MSG_RESULT(no)
JAVAC=""
JAVA=""
else
AC_MSG_RESULT(yes)
fi
ax_javac_and_java="$success"
])
AC_DEFUN([AX_CHECK_JAVA_CLASS],
[
AC_MSG_CHECKING(for Java class [$1])
echo "import $1; public class configtest_ax_javac_and_java { public static void main(String args@<:@@:>@) { } }" > configtest_ax_javac_and_java.java
echo "Running \"$JAVAC configtest_ax_javac_and_java.java\"" >&AS_MESSAGE_LOG_FD
if $JAVAC configtest_ax_javac_and_java.java >&AS_MESSAGE_LOG_FD 2>&1 ; then
AC_MSG_RESULT(yes)
success=yes
else
AC_MSG_RESULT(no)
success=no
fi
rm -f configtest_ax_javac_and_java.java configtest_ax_javac_and_java.class
])
AC_DEFUN([AX_CHECK_ANT_VERSION],
[
AC_MSG_CHECKING(for ant version > $2)
ANT_VALID=`expr "x$(printf "$2\n$($1 -version 2>/dev/null | sed -n 's/.*version \(@<:@0-9\.@:>@*\).*/\1/p')" | sort -t '.' -k 1,1 -k 2,2 -k 3,3 -g | sed -n 1p)" = "x$2"`
if test "x$ANT_VALID" = "x1" ; then
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
ANT=""
fi
])

View file

@ -1,194 +0,0 @@
dnl @synopsis AX_LIB_EVENT([MINIMUM-VERSION])
dnl
dnl Test for the libevent library of a particular version (or newer).
dnl
dnl If no path to the installed libevent is given, the macro will first try
dnl using no -I or -L flags, then searches under /usr, /usr/local, /opt,
dnl and /opt/libevent.
dnl If these all fail, it will try the $LIBEVENT_ROOT environment variable.
dnl
dnl This macro requires that #include <sys/types.h> works and defines u_char.
dnl
dnl This macro calls:
dnl AC_SUBST(LIBEVENT_CPPFLAGS)
dnl AC_SUBST(LIBEVENT_LDFLAGS)
dnl AC_SUBST(LIBEVENT_LIBS)
dnl
dnl And (if libevent is found):
dnl AC_DEFINE(HAVE_LIBEVENT)
dnl
dnl It also leaves the shell variables "success" and "ax_have_libevent"
dnl set to "yes" or "no".
dnl
dnl NOTE: This macro does not currently work for cross-compiling,
dnl but it can be easily modified to allow it. (grep "cross").
dnl
dnl @category InstalledPackages
dnl @category C
dnl @version 2007-09-12
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
dnl Input: ax_libevent_path, WANT_LIBEVENT_VERSION
dnl Output: success=yes/no
AC_DEFUN([AX_LIB_EVENT_DO_CHECK],
[
# Save our flags.
CPPFLAGS_SAVED="$CPPFLAGS"
LDFLAGS_SAVED="$LDFLAGS"
LIBS_SAVED="$LIBS"
LD_LIBRARY_PATH_SAVED="$LD_LIBRARY_PATH"
# Set our flags if we are checking a specific directory.
if test -n "$ax_libevent_path" ; then
LIBEVENT_CPPFLAGS="-I$ax_libevent_path/include"
LIBEVENT_LDFLAGS="-L$ax_libevent_path/lib"
LD_LIBRARY_PATH="$ax_libevent_path/lib:$LD_LIBRARY_PATH"
else
LIBEVENT_CPPFLAGS=""
LIBEVENT_LDFLAGS=""
fi
# Required flag for libevent.
LIBEVENT_LIBS="-levent"
# Prepare the environment for compilation.
CPPFLAGS="$CPPFLAGS $LIBEVENT_CPPFLAGS"
LDFLAGS="$LDFLAGS $LIBEVENT_LDFLAGS"
LIBS="$LIBS $LIBEVENT_LIBS"
export CPPFLAGS
export LDFLAGS
export LIBS
export LD_LIBRARY_PATH
success=no
# Compile, link, and run the program. This checks:
# - event.h is available for including.
# - event_get_version() is available for linking.
# - The event version string is lexicographically greater
# than the required version.
AC_LANG_PUSH([C])
dnl This can be changed to AC_LINK_IFELSE if you are cross-compiling,
dnl but then the version cannot be checked.
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <sys/types.h>
#include <event.h>
]], [[
const char* lib_version = event_get_version();
const char* wnt_version = "$WANT_LIBEVENT_VERSION";
int lib_digits;
int wnt_digits;
for (;;) {
/* If we reached the end of the want version. We have it. */
if (*wnt_version == '\0' || *wnt_version == '-') {
return 0;
}
/* If the want version continues but the lib version does not, */
/* we are missing a letter. We don't have it. */
if (*lib_version == '\0' || *lib_version == '-') {
return 1;
}
/* In the 1.4 version numbering style, if there are more digits */
/* in one version than the other, that one is higher. */
for (lib_digits = 0;
lib_version[lib_digits] >= '0' &&
lib_version[lib_digits] <= '9';
lib_digits++)
;
for (wnt_digits = 0;
wnt_version[wnt_digits] >= '0' &&
wnt_version[wnt_digits] <= '9';
wnt_digits++)
;
if (lib_digits > wnt_digits) {
return 0;
}
if (lib_digits < wnt_digits) {
return 1;
}
/* If we have greater than what we want. We have it. */
if (*lib_version > *wnt_version) {
return 0;
}
/* If we have less, we don't. */
if (*lib_version < *wnt_version) {
return 1;
}
lib_version++;
wnt_version++;
}
return 0;
]])], [
success=yes
])
AC_LANG_POP([C])
# Restore flags.
CPPFLAGS="$CPPFLAGS_SAVED"
LDFLAGS="$LDFLAGS_SAVED"
LIBS="$LIBS_SAVED"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH_SAVED"
])
AC_DEFUN([AX_LIB_EVENT],
[
dnl Allow search path to be overridden on the command line.
AC_ARG_WITH([libevent],
AS_HELP_STRING([--with-libevent@<:@=DIR@:>@], [use libevent [default=yes]. Optionally specify the root prefix dir where libevent is installed]),
[
if test "x$withval" = "xno"; then
want_libevent="no"
elif test "x$withval" = "xyes"; then
want_libevent="yes"
ax_libevent_path=""
else
want_libevent="yes"
ax_libevent_path="$withval"
fi
],
[ want_libevent="yes" ; ax_libevent_path="" ])
if test "$want_libevent" = "yes"; then
WANT_LIBEVENT_VERSION=ifelse([$1], ,1.2,$1)
AC_MSG_CHECKING(for libevent >= $WANT_LIBEVENT_VERSION)
# Run tests.
if test -n "$ax_libevent_path"; then
AX_LIB_EVENT_DO_CHECK
else
for ax_libevent_path in "" $lt_sysroot/usr $lt_sysroot/usr/local $lt_sysroot/opt $lt_sysroot/opt/local $lt_sysroot/opt/libevent "$LIBEVENT_ROOT" ; do
AX_LIB_EVENT_DO_CHECK
if test "$success" = "yes"; then
break;
fi
done
fi
if test "$success" != "yes" ; then
AC_MSG_RESULT(no)
LIBEVENT_CPPFLAGS=""
LIBEVENT_LDFLAGS=""
LIBEVENT_LIBS=""
else
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_LIBEVENT,,[define if libevent is available])
ax_have_libevent_[]m4_translit([$1], [.], [_])="yes"
fi
ax_have_libevent="$success"
AC_SUBST(LIBEVENT_CPPFLAGS)
AC_SUBST(LIBEVENT_LDFLAGS)
AC_SUBST(LIBEVENT_LIBS)
fi
])

View file

@ -1,173 +0,0 @@
dnl @synopsis AX_LIB_ZLIB([MINIMUM-VERSION])
dnl
dnl Test for the libz library of a particular version (or newer).
dnl
dnl If no path to the installed zlib is given, the macro will first try
dnl using no -I or -L flags, then searches under /usr, /usr/local, /opt,
dnl and /opt/zlib.
dnl If these all fail, it will try the $ZLIB_ROOT environment variable.
dnl
dnl This macro calls:
dnl AC_SUBST(ZLIB_CPPFLAGS)
dnl AC_SUBST(ZLIB_LDFLAGS)
dnl AC_SUBST(ZLIB_LIBS)
dnl
dnl And (if zlib is found):
dnl AC_DEFINE(HAVE_ZLIB)
dnl
dnl It also leaves the shell variables "success" and "ax_have_zlib"
dnl set to "yes" or "no".
dnl
dnl NOTE: This macro does not currently work for cross-compiling,
dnl but it can be easily modified to allow it. (grep "cross").
dnl
dnl @category InstalledPackages
dnl @category C
dnl @version 2007-09-12
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
dnl Input: ax_zlib_path, WANT_ZLIB_VERSION
dnl Output: success=yes/no
AC_DEFUN([AX_LIB_ZLIB_DO_CHECK],
[
# Save our flags.
CPPFLAGS_SAVED="$CPPFLAGS"
LDFLAGS_SAVED="$LDFLAGS"
LIBS_SAVED="$LIBS"
LD_LIBRARY_PATH_SAVED="$LD_LIBRARY_PATH"
# Set our flags if we are checking a specific directory.
if test -n "$ax_zlib_path" ; then
ZLIB_CPPFLAGS="-I$ax_zlib_path/include"
ZLIB_LDFLAGS="-L$ax_zlib_path/lib"
LD_LIBRARY_PATH="$ax_zlib_path/lib:$LD_LIBRARY_PATH"
else
ZLIB_CPPFLAGS=""
ZLIB_LDFLAGS=""
fi
# Required flag for zlib.
ZLIB_LIBS="-lz"
# Prepare the environment for compilation.
CPPFLAGS="$CPPFLAGS $ZLIB_CPPFLAGS"
LDFLAGS="$LDFLAGS $ZLIB_LDFLAGS"
LIBS="$LIBS $ZLIB_LIBS"
export CPPFLAGS
export LDFLAGS
export LIBS
export LD_LIBRARY_PATH
success=no
# Compile, link, and run the program. This checks:
# - zlib.h is available for including.
# - zlibVersion() is available for linking.
# - ZLIB_VERNUM is greater than or equal to the desired version.
# - ZLIB_VERSION (defined in zlib.h) matches zlibVersion()
# (defined in the library).
AC_LANG_PUSH([C])
dnl This can be changed to AC_LINK_IFELSE if you are cross-compiling.
AC_LINK_IFELSE([AC_LANG_PROGRAM([[
#include <zlib.h>
#if ZLIB_VERNUM >= 0x$WANT_ZLIB_VERSION
#else
# error zlib is too old
#endif
]], [[
const char* lib_version = zlibVersion();
const char* hdr_version = ZLIB_VERSION;
for (;;) {
if (*lib_version != *hdr_version) {
/* If this happens, your zlib header doesn't match your zlib */
/* library. That is really bad. */
return 1;
}
if (*lib_version == '\0') {
break;
}
lib_version++;
hdr_version++;
}
return 0;
]])], [
success=yes
])
AC_LANG_POP([C])
# Restore flags.
CPPFLAGS="$CPPFLAGS_SAVED"
LDFLAGS="$LDFLAGS_SAVED"
LIBS="$LIBS_SAVED"
LD_LIBRARY_PATH="$LD_LIBRARY_PATH_SAVED"
])
AC_DEFUN([AX_LIB_ZLIB],
[
dnl Allow search path to be overridden on the command line.
AC_ARG_WITH([zlib],
AS_HELP_STRING([--with-zlib@<:@=DIR@:>@], [use zlib (default is yes) - it is possible to specify an alternate root directory for zlib]),
[
if test "x$withval" = "xno"; then
want_zlib="no"
elif test "x$withval" = "xyes"; then
want_zlib="yes"
ax_zlib_path=""
else
want_zlib="yes"
ax_zlib_path="$withval"
fi
],
[want_zlib="yes" ; ax_zlib_path="" ])
if test "$want_zlib" = "yes"; then
# Parse out the version.
zlib_version_req=ifelse([$1], ,1.2.3,$1)
zlib_version_req_major=`expr $zlib_version_req : '\([[0-9]]*\)'`
zlib_version_req_minor=`expr $zlib_version_req : '[[0-9]]*\.\([[0-9]]*\)'`
zlib_version_req_patch=`expr $zlib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
if test -z "$zlib_version_req_patch" ; then
zlib_version_req_patch="0"
fi
WANT_ZLIB_VERSION=`expr $zlib_version_req_major \* 1000 \+ $zlib_version_req_minor \* 100 \+ $zlib_version_req_patch \* 10`
AC_MSG_CHECKING(for zlib >= $zlib_version_req)
# Run tests.
if test -n "$ax_zlib_path"; then
AX_LIB_ZLIB_DO_CHECK
else
for ax_zlib_path in "" /usr /usr/local /opt /opt/zlib "$ZLIB_ROOT" ; do
AX_LIB_ZLIB_DO_CHECK
if test "$success" = "yes"; then
break;
fi
done
fi
if test "$success" != "yes" ; then
AC_MSG_RESULT(no)
ZLIB_CPPFLAGS=""
ZLIB_LDFLAGS=""
ZLIB_LIBS=""
else
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZLIB,,[define if zlib is available])
fi
ax_have_zlib="$success"
AC_SUBST(ZLIB_CPPFLAGS)
AC_SUBST(ZLIB_LDFLAGS)
AC_SUBST(ZLIB_LIBS)
fi
])

View file

@ -1,664 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_lua.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_PROG_LUA[([MINIMUM-VERSION], [TOO-BIG-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
# AX_LUA_HEADERS[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
# AX_LUA_LIBS[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
# AX_LUA_READLINE[([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])]
#
# DESCRIPTION
#
# Detect a Lua interpreter, optionally specifying a minimum and maximum
# version number. Set up important Lua paths, such as the directories in
# which to install scripts and modules (shared libraries).
#
# Also detect Lua headers and libraries. The Lua version contained in the
# header is checked to match the Lua interpreter version exactly. When
# searching for Lua libraries, the version number is used as a suffix.
# This is done with the goal of supporting multiple Lua installs (5.1,
# 5.2, and 5.3 side-by-side).
#
# A note on compatibility with previous versions: This file has been
# mostly rewritten for serial 18. Most developers should be able to use
# these macros without needing to modify configure.ac. Care has been taken
# to preserve each macro's behavior, but there are some differences:
#
# 1) AX_WITH_LUA is deprecated; it now expands to the exact same thing as
# AX_PROG_LUA with no arguments.
#
# 2) AX_LUA_HEADERS now checks that the version number defined in lua.h
# matches the interpreter version. AX_LUA_HEADERS_VERSION is therefore
# unnecessary, so it is deprecated and does not expand to anything.
#
# 3) The configure flag --with-lua-suffix no longer exists; the user
# should instead specify the LUA precious variable on the command line.
# See the AX_PROG_LUA description for details.
#
# Please read the macro descriptions below for more information.
#
# This file was inspired by Andrew Dalke's and James Henstridge's
# python.m4 and Tom Payne's, Matthieu Moy's, and Reuben Thomas's ax_lua.m4
# (serial 17). Basically, this file is a mash-up of those two files. I
# like to think it combines the best of the two!
#
# AX_PROG_LUA: Search for the Lua interpreter, and set up important Lua
# paths. Adds precious variable LUA, which may contain the path of the Lua
# interpreter. If LUA is blank, the user's path is searched for an
# suitable interpreter.
#
# If MINIMUM-VERSION is supplied, then only Lua interpreters with a
# version number greater or equal to MINIMUM-VERSION will be accepted. If
# TOO-BIG-VERSION is also supplied, then only Lua interpreters with a
# version number greater or equal to MINIMUM-VERSION and less than
# TOO-BIG-VERSION will be accepted.
#
# The Lua version number, LUA_VERSION, is found from the interpreter, and
# substituted. LUA_PLATFORM is also found, but not currently supported (no
# standard representation).
#
# Finally, the macro finds four paths:
#
# luadir Directory to install Lua scripts.
# pkgluadir $luadir/$PACKAGE
# luaexecdir Directory to install Lua modules.
# pkgluaexecdir $luaexecdir/$PACKAGE
#
# These paths are found based on $prefix, $exec_prefix, Lua's
# package.path, and package.cpath. The first path of package.path
# beginning with $prefix is selected as luadir. The first path of
# package.cpath beginning with $exec_prefix is used as luaexecdir. This
# should work on all reasonable Lua installations. If a path cannot be
# determined, a default path is used. Of course, the user can override
# these later when invoking make.
#
# luadir Default: $prefix/share/lua/$LUA_VERSION
# luaexecdir Default: $exec_prefix/lib/lua/$LUA_VERSION
#
# These directories can be used by Automake as install destinations. The
# variable name minus 'dir' needs to be used as a prefix to the
# appropriate Automake primary, e.g. lua_SCRIPS or luaexec_LIBRARIES.
#
# If an acceptable Lua interpreter is found, then ACTION-IF-FOUND is
# performed, otherwise ACTION-IF-NOT-FOUND is preformed. If ACTION-IF-NOT-
# FOUND is blank, then it will default to printing an error. To prevent
# the default behavior, give ':' as an action.
#
# AX_LUA_HEADERS: Search for Lua headers. Requires that AX_PROG_LUA be
# expanded before this macro. Adds precious variable LUA_INCLUDE, which
# may contain Lua specific include flags, e.g. -I/usr/include/lua5.1. If
# LUA_INCLUDE is blank, then this macro will attempt to find suitable
# flags.
#
# LUA_INCLUDE can be used by Automake to compile Lua modules or
# executables with embedded interpreters. The *_CPPFLAGS variables should
# be used for this purpose, e.g. myprog_CPPFLAGS = $(LUA_INCLUDE).
#
# This macro searches for the header lua.h (and others). The search is
# performed with a combination of CPPFLAGS, CPATH, etc, and LUA_INCLUDE.
# If the search is unsuccessful, then some common directories are tried.
# If the headers are then found, then LUA_INCLUDE is set accordingly.
#
# The paths automatically searched are:
#
# * /usr/include/luaX.Y
# * /usr/include/lua/X.Y
# * /usr/include/luaXY
# * /usr/local/include/luaX.Y
# * /usr/local/include/lua-X.Y
# * /usr/local/include/lua/X.Y
# * /usr/local/include/luaXY
#
# (Where X.Y is the Lua version number, e.g. 5.1.)
#
# The Lua version number found in the headers is always checked to match
# the Lua interpreter's version number. Lua headers with mismatched
# version numbers are not accepted.
#
# If headers are found, then ACTION-IF-FOUND is performed, otherwise
# ACTION-IF-NOT-FOUND is performed. If ACTION-IF-NOT-FOUND is blank, then
# it will default to printing an error. To prevent the default behavior,
# set the action to ':'.
#
# AX_LUA_LIBS: Search for Lua libraries. Requires that AX_PROG_LUA be
# expanded before this macro. Adds precious variable LUA_LIB, which may
# contain Lua specific linker flags, e.g. -llua5.1. If LUA_LIB is blank,
# then this macro will attempt to find suitable flags.
#
# LUA_LIB can be used by Automake to link Lua modules or executables with
# embedded interpreters. The *_LIBADD and *_LDADD variables should be used
# for this purpose, e.g. mymod_LIBADD = $(LUA_LIB).
#
# This macro searches for the Lua library. More technically, it searches
# for a library containing the function lua_load. The search is performed
# with a combination of LIBS, LIBRARY_PATH, and LUA_LIB.
#
# If the search determines that some linker flags are missing, then those
# flags will be added to LUA_LIB.
#
# If libraries are found, then ACTION-IF-FOUND is performed, otherwise
# ACTION-IF-NOT-FOUND is performed. If ACTION-IF-NOT-FOUND is blank, then
# it will default to printing an error. To prevent the default behavior,
# set the action to ':'.
#
# AX_LUA_READLINE: Search for readline headers and libraries. Requires the
# AX_LIB_READLINE macro, which is provided by ax_lib_readline.m4 from the
# Autoconf Archive.
#
# If a readline compatible library is found, then ACTION-IF-FOUND is
# performed, otherwise ACTION-IF-NOT-FOUND is performed.
#
# LICENSE
#
# Copyright (c) 2015 Reuben Thomas <rrt@sc3d.org>
# Copyright (c) 2014 Tim Perkins <tprk77@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 40
dnl =========================================================================
dnl AX_PROG_LUA([MINIMUM-VERSION], [TOO-BIG-VERSION],
dnl [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl =========================================================================
AC_DEFUN([AX_PROG_LUA],
[
dnl Check for required tools.
AC_REQUIRE([AC_PROG_GREP])
AC_REQUIRE([AC_PROG_SED])
dnl Make LUA a precious variable.
AC_ARG_VAR([LUA], [The Lua interpreter, e.g. /usr/bin/lua5.1])
dnl Find a Lua interpreter.
m4_define_default([_AX_LUA_INTERPRETER_LIST],
[lua lua5.3 lua53 lua5.2 lua52 lua5.1 lua51 lua50])
m4_if([$1], [],
[ dnl No version check is needed. Find any Lua interpreter.
AS_IF([test "x$LUA" = 'x'],
[AC_PATH_PROGS([LUA], [_AX_LUA_INTERPRETER_LIST], [:])])
ax_display_LUA='lua'
AS_IF([test "x$LUA" != 'x:'],
[ dnl At least check if this is a Lua interpreter.
AC_MSG_CHECKING([if $LUA is a Lua interpreter])
_AX_LUA_CHK_IS_INTRP([$LUA],
[AC_MSG_RESULT([yes])],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([not a Lua interpreter])
])
])
],
[ dnl A version check is needed.
AS_IF([test "x$LUA" != 'x'],
[ dnl Check if this is a Lua interpreter.
AC_MSG_CHECKING([if $LUA is a Lua interpreter])
_AX_LUA_CHK_IS_INTRP([$LUA],
[AC_MSG_RESULT([yes])],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([not a Lua interpreter])
])
dnl Check the version.
m4_if([$2], [],
[_ax_check_text="whether $LUA version >= $1"],
[_ax_check_text="whether $LUA version >= $1, < $2"])
AC_MSG_CHECKING([$_ax_check_text])
_AX_LUA_CHK_VER([$LUA], [$1], [$2],
[AC_MSG_RESULT([yes])],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([version is out of range for specified LUA])])
ax_display_LUA=$LUA
],
[ dnl Try each interpreter until we find one that satisfies VERSION.
m4_if([$2], [],
[_ax_check_text="for a Lua interpreter with version >= $1"],
[_ax_check_text="for a Lua interpreter with version >= $1, < $2"])
AC_CACHE_CHECK([$_ax_check_text],
[ax_cv_pathless_LUA],
[ for ax_cv_pathless_LUA in _AX_LUA_INTERPRETER_LIST none; do
test "x$ax_cv_pathless_LUA" = 'xnone' && break
_AX_LUA_CHK_IS_INTRP([$ax_cv_pathless_LUA], [], [continue])
_AX_LUA_CHK_VER([$ax_cv_pathless_LUA], [$1], [$2], [break])
done
])
dnl Set $LUA to the absolute path of $ax_cv_pathless_LUA.
AS_IF([test "x$ax_cv_pathless_LUA" = 'xnone'],
[LUA=':'],
[AC_PATH_PROG([LUA], [$ax_cv_pathless_LUA])])
ax_display_LUA=$ax_cv_pathless_LUA
])
])
AS_IF([test "x$LUA" = 'x:'],
[ dnl Run any user-specified action, or abort.
m4_default([$4], [AC_MSG_ERROR([cannot find suitable Lua interpreter])])
],
[ dnl Query Lua for its version number.
AC_CACHE_CHECK([for $ax_display_LUA version],
[ax_cv_lua_version],
[ dnl Get the interpreter version in X.Y format. This should work for
dnl interpreters version 5.0 and beyond.
ax_cv_lua_version=[`$LUA -e '
-- return a version number in X.Y format
local _, _, ver = string.find(_VERSION, "^Lua (%d+%.%d+)")
print(ver)'`]
])
AS_IF([test "x$ax_cv_lua_version" = 'x'],
[AC_MSG_ERROR([invalid Lua version number])])
AC_SUBST([LUA_VERSION], [$ax_cv_lua_version])
AC_SUBST([LUA_SHORT_VERSION], [`echo "$LUA_VERSION" | $SED 's|\.||'`])
dnl The following check is not supported:
dnl At times (like when building shared libraries) you may want to know
dnl which OS platform Lua thinks this is.
AC_CACHE_CHECK([for $ax_display_LUA platform],
[ax_cv_lua_platform],
[ax_cv_lua_platform=[`$LUA -e 'print("unknown")'`]])
AC_SUBST([LUA_PLATFORM], [$ax_cv_lua_platform])
dnl Use the values of $prefix and $exec_prefix for the corresponding
dnl values of LUA_PREFIX and LUA_EXEC_PREFIX. These are made distinct
dnl variables so they can be overridden if need be. However, the general
dnl consensus is that you shouldn't need this ability.
AC_SUBST([LUA_PREFIX], ['${prefix}'])
AC_SUBST([LUA_EXEC_PREFIX], ['${exec_prefix}'])
dnl Lua provides no way to query the script directory, and instead
dnl provides LUA_PATH. However, we should be able to make a safe educated
dnl guess. If the built-in search path contains a directory which is
dnl prefixed by $prefix, then we can store scripts there. The first
dnl matching path will be used.
AC_CACHE_CHECK([for $ax_display_LUA script directory],
[ax_cv_lua_luadir],
[ AS_IF([test "x$prefix" = 'xNONE'],
[ax_lua_prefix=$ac_default_prefix],
[ax_lua_prefix=$prefix])
dnl Initialize to the default path.
ax_cv_lua_luadir="$LUA_PREFIX/share/lua/$LUA_VERSION"
dnl Try to find a path with the prefix.
_AX_LUA_FND_PRFX_PTH([$LUA], [$ax_lua_prefix], [script])
AS_IF([test "x$ax_lua_prefixed_path" != 'x'],
[ dnl Fix the prefix.
_ax_strip_prefix=`echo "$ax_lua_prefix" | $SED 's|.|.|g'`
ax_cv_lua_luadir=`echo "$ax_lua_prefixed_path" | \
$SED "s|^$_ax_strip_prefix|$LUA_PREFIX|"`
])
])
AC_SUBST([luadir], [$ax_cv_lua_luadir])
AC_SUBST([pkgluadir], [\${luadir}/$PACKAGE])
dnl Lua provides no way to query the module directory, and instead
dnl provides LUA_PATH. However, we should be able to make a safe educated
dnl guess. If the built-in search path contains a directory which is
dnl prefixed by $exec_prefix, then we can store modules there. The first
dnl matching path will be used.
AC_CACHE_CHECK([for $ax_display_LUA module directory],
[ax_cv_lua_luaexecdir],
[ AS_IF([test "x$exec_prefix" = 'xNONE'],
[ax_lua_exec_prefix=$ax_lua_prefix],
[ax_lua_exec_prefix=$exec_prefix])
dnl Initialize to the default path.
ax_cv_lua_luaexecdir="$LUA_EXEC_PREFIX/lib/lua/$LUA_VERSION"
dnl Try to find a path with the prefix.
_AX_LUA_FND_PRFX_PTH([$LUA],
[$ax_lua_exec_prefix], [module])
AS_IF([test "x$ax_lua_prefixed_path" != 'x'],
[ dnl Fix the prefix.
_ax_strip_prefix=`echo "$ax_lua_exec_prefix" | $SED 's|.|.|g'`
ax_cv_lua_luaexecdir=`echo "$ax_lua_prefixed_path" | \
$SED "s|^$_ax_strip_prefix|$LUA_EXEC_PREFIX|"`
])
])
AC_SUBST([luaexecdir], [$ax_cv_lua_luaexecdir])
AC_SUBST([pkgluaexecdir], [\${luaexecdir}/$PACKAGE])
dnl Run any user specified action.
$3
])
])
dnl AX_WITH_LUA is now the same thing as AX_PROG_LUA.
AC_DEFUN([AX_WITH_LUA],
[
AC_MSG_WARN([[$0 is deprecated, please use AX_PROG_LUA instead]])
AX_PROG_LUA
])
dnl =========================================================================
dnl _AX_LUA_CHK_IS_INTRP(PROG, [ACTION-IF-TRUE], [ACTION-IF-FALSE])
dnl =========================================================================
AC_DEFUN([_AX_LUA_CHK_IS_INTRP],
[
dnl A minimal Lua factorial to prove this is an interpreter. This should work
dnl for Lua interpreters version 5.0 and beyond.
_ax_lua_factorial=[`$1 2>/dev/null -e '
-- a simple factorial
function fact (n)
if n == 0 then
return 1
else
return n * fact(n-1)
end
end
print("fact(5) is " .. fact(5))'`]
AS_IF([test "$_ax_lua_factorial" = 'fact(5) is 120'],
[$2], [$3])
])
dnl =========================================================================
dnl _AX_LUA_CHK_VER(PROG, MINIMUM-VERSION, [TOO-BIG-VERSION],
dnl [ACTION-IF-TRUE], [ACTION-IF-FALSE])
dnl =========================================================================
AC_DEFUN([_AX_LUA_CHK_VER],
[
dnl Check that the Lua version is within the bounds. Only the major and minor
dnl version numbers are considered. This should work for Lua interpreters
dnl version 5.0 and beyond.
_ax_lua_good_version=[`$1 -e '
-- a script to compare versions
function verstr2num(verstr)
local _, _, majorver, minorver = string.find(verstr, "^(%d+)%.(%d+)")
if majorver and minorver then
return tonumber(majorver) * 100 + tonumber(minorver)
end
end
local minver = verstr2num("$2")
local _, _, trimver = string.find(_VERSION, "^Lua (.*)")
local ver = verstr2num(trimver)
local maxver = verstr2num("$3") or 1e9
if minver <= ver and ver < maxver then
print("yes")
else
print("no")
end'`]
AS_IF([test "x$_ax_lua_good_version" = "xyes"],
[$4], [$5])
])
dnl =========================================================================
dnl _AX_LUA_FND_PRFX_PTH(PROG, PREFIX, SCRIPT-OR-MODULE-DIR)
dnl =========================================================================
AC_DEFUN([_AX_LUA_FND_PRFX_PTH],
[
dnl Get the script or module directory by querying the Lua interpreter,
dnl filtering on the given prefix, and selecting the shallowest path. If no
dnl path is found matching the prefix, the result will be an empty string.
dnl The third argument determines the type of search, it can be 'script' or
dnl 'module'. Supplying 'script' will perform the search with package.path
dnl and LUA_PATH, and supplying 'module' will search with package.cpath and
dnl LUA_CPATH. This is done for compatibility with Lua 5.0.
ax_lua_prefixed_path=[`$1 -e '
-- get the path based on search type
local searchtype = "$3"
local paths = ""
if searchtype == "script" then
paths = (package and package.path) or LUA_PATH
elseif searchtype == "module" then
paths = (package and package.cpath) or LUA_CPATH
end
-- search for the prefix
local prefix = "'$2'"
local minpath = ""
local mindepth = 1e9
string.gsub(paths, "(@<:@^;@:>@+)",
function (path)
path = string.gsub(path, "%?.*$", "")
path = string.gsub(path, "/@<:@^/@:>@*$", "")
if string.find(path, prefix) then
local depth = string.len(string.gsub(path, "@<:@^/@:>@", ""))
if depth < mindepth then
minpath = path
mindepth = depth
end
end
end)
print(minpath)'`]
])
dnl =========================================================================
dnl AX_LUA_HEADERS([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl =========================================================================
AC_DEFUN([AX_LUA_HEADERS],
[
dnl Check for LUA_VERSION.
AC_MSG_CHECKING([if LUA_VERSION is defined])
AS_IF([test "x$LUA_VERSION" != 'x'],
[AC_MSG_RESULT([yes])],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([cannot check Lua headers without knowing LUA_VERSION])
])
dnl Make LUA_INCLUDE a precious variable.
AC_ARG_VAR([LUA_INCLUDE], [The Lua includes, e.g. -I/usr/include/lua5.1])
dnl Some default directories to search.
LUA_SHORT_VERSION=`echo "$LUA_VERSION" | $SED 's|\.||'`
m4_define_default([_AX_LUA_INCLUDE_LIST],
[ /usr/include/lua$LUA_VERSION \
/usr/include/lua-$LUA_VERSION \
/usr/include/lua/$LUA_VERSION \
/usr/include/lua$LUA_SHORT_VERSION \
/usr/local/include/lua$LUA_VERSION \
/usr/local/include/lua-$LUA_VERSION \
/usr/local/include/lua/$LUA_VERSION \
/usr/local/include/lua$LUA_SHORT_VERSION \
])
dnl Try to find the headers.
_ax_lua_saved_cppflags=$CPPFLAGS
CPPFLAGS="$CPPFLAGS $LUA_INCLUDE"
AC_CHECK_HEADERS([lua.h lualib.h lauxlib.h luaconf.h])
CPPFLAGS=$_ax_lua_saved_cppflags
dnl Try some other directories if LUA_INCLUDE was not set.
AS_IF([test "x$LUA_INCLUDE" = 'x' &&
test "x$ac_cv_header_lua_h" != 'xyes'],
[ dnl Try some common include paths.
for _ax_include_path in _AX_LUA_INCLUDE_LIST; do
test ! -d "$_ax_include_path" && continue
AC_MSG_CHECKING([for Lua headers in])
AC_MSG_RESULT([$_ax_include_path])
AS_UNSET([ac_cv_header_lua_h])
AS_UNSET([ac_cv_header_lualib_h])
AS_UNSET([ac_cv_header_lauxlib_h])
AS_UNSET([ac_cv_header_luaconf_h])
_ax_lua_saved_cppflags=$CPPFLAGS
CPPFLAGS="$CPPFLAGS -I$_ax_include_path"
AC_CHECK_HEADERS([lua.h lualib.h lauxlib.h luaconf.h])
CPPFLAGS=$_ax_lua_saved_cppflags
AS_IF([test "x$ac_cv_header_lua_h" = 'xyes'],
[ LUA_INCLUDE="-I$_ax_include_path"
break
])
done
])
AS_IF([test "x$ac_cv_header_lua_h" = 'xyes'],
[ dnl Make a program to print LUA_VERSION defined in the header.
dnl TODO It would be really nice if we could do this without compiling a
dnl program, then it would work when cross compiling. But I'm not sure how
dnl to do this reliably. For now, assume versions match when cross compiling.
AS_IF([test "x$cross_compiling" != 'xyes'],
[ AC_CACHE_CHECK([for Lua header version],
[ax_cv_lua_header_version],
[ _ax_lua_saved_cppflags=$CPPFLAGS
CPPFLAGS="$CPPFLAGS $LUA_INCLUDE"
AC_RUN_IFELSE(
[ AC_LANG_SOURCE([[
#include <lua.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char ** argv)
{
if(argc > 1) printf("%s", LUA_VERSION);
exit(EXIT_SUCCESS);
}
]])
],
[ ax_cv_lua_header_version=`./conftest$EXEEXT p | \
$SED -n "s|^Lua \(@<:@0-9@:>@\{1,\}\.@<:@0-9@:>@\{1,\}\).\{0,\}|\1|p"`
],
[ax_cv_lua_header_version='unknown'])
CPPFLAGS=$_ax_lua_saved_cppflags
])
dnl Compare this to the previously found LUA_VERSION.
AC_MSG_CHECKING([if Lua header version matches $LUA_VERSION])
AS_IF([test "x$ax_cv_lua_header_version" = "x$LUA_VERSION"],
[ AC_MSG_RESULT([yes])
ax_header_version_match='yes'
],
[ AC_MSG_RESULT([no])
ax_header_version_match='no'
])
],
[ AC_MSG_WARN([cross compiling so assuming header version number matches])
ax_header_version_match='yes'
])
])
dnl Was LUA_INCLUDE specified?
AS_IF([test "x$ax_header_version_match" != 'xyes' &&
test "x$LUA_INCLUDE" != 'x'],
[AC_MSG_ERROR([cannot find headers for specified LUA_INCLUDE])])
dnl Test the final result and run user code.
AS_IF([test "x$ax_header_version_match" = 'xyes'], [$1],
[m4_default([$2], [AC_MSG_ERROR([cannot find Lua includes])])])
])
dnl AX_LUA_HEADERS_VERSION no longer exists, use AX_LUA_HEADERS.
AC_DEFUN([AX_LUA_HEADERS_VERSION],
[
AC_MSG_WARN([[$0 is deprecated, please use AX_LUA_HEADERS instead]])
])
dnl =========================================================================
dnl AX_LUA_LIBS([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl =========================================================================
AC_DEFUN([AX_LUA_LIBS],
[
dnl TODO Should this macro also check various -L flags?
dnl Check for LUA_VERSION.
AC_MSG_CHECKING([if LUA_VERSION is defined])
AS_IF([test "x$LUA_VERSION" != 'x'],
[AC_MSG_RESULT([yes])],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([cannot check Lua libs without knowing LUA_VERSION])
])
dnl Make LUA_LIB a precious variable.
AC_ARG_VAR([LUA_LIB], [The Lua library, e.g. -llua5.1])
AS_IF([test "x$LUA_LIB" != 'x'],
[ dnl Check that LUA_LIBS works.
_ax_lua_saved_libs=$LIBS
LIBS="$LIBS $LUA_LIB"
AC_SEARCH_LIBS([lua_load], [],
[_ax_found_lua_libs='yes'],
[_ax_found_lua_libs='no'])
LIBS=$_ax_lua_saved_libs
dnl Check the result.
AS_IF([test "x$_ax_found_lua_libs" != 'xyes'],
[AC_MSG_ERROR([cannot find libs for specified LUA_LIB])])
],
[ dnl First search for extra libs.
_ax_lua_extra_libs=''
_ax_lua_saved_libs=$LIBS
LIBS="$LIBS $LUA_LIB"
AC_SEARCH_LIBS([exp], [m])
AC_SEARCH_LIBS([dlopen], [dl])
LIBS=$_ax_lua_saved_libs
AS_IF([test "x$ac_cv_search_exp" != 'xno' &&
test "x$ac_cv_search_exp" != 'xnone required'],
[_ax_lua_extra_libs="$_ax_lua_extra_libs $ac_cv_search_exp"])
AS_IF([test "x$ac_cv_search_dlopen" != 'xno' &&
test "x$ac_cv_search_dlopen" != 'xnone required'],
[_ax_lua_extra_libs="$_ax_lua_extra_libs $ac_cv_search_dlopen"])
dnl Try to find the Lua libs.
_ax_lua_saved_libs=$LIBS
LIBS="$LIBS $LUA_LIB"
AC_SEARCH_LIBS([lua_load],
[ lua$LUA_VERSION \
lua$LUA_SHORT_VERSION \
lua-$LUA_VERSION \
lua-$LUA_SHORT_VERSION \
lua \
],
[_ax_found_lua_libs='yes'],
[_ax_found_lua_libs='no'],
[$_ax_lua_extra_libs])
LIBS=$_ax_lua_saved_libs
AS_IF([test "x$ac_cv_search_lua_load" != 'xno' &&
test "x$ac_cv_search_lua_load" != 'xnone required'],
[LUA_LIB="$ac_cv_search_lua_load $_ax_lua_extra_libs"])
])
dnl Test the result and run user code.
AS_IF([test "x$_ax_found_lua_libs" = 'xyes'], [$1],
[m4_default([$2], [AC_MSG_ERROR([cannot find Lua libs])])])
])
dnl =========================================================================
dnl AX_LUA_READLINE([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
dnl =========================================================================
AC_DEFUN([AX_LUA_READLINE],
[
AX_LIB_READLINE
AS_IF([test "x$ac_cv_header_readline_readline_h" != 'x' &&
test "x$ac_cv_header_readline_history_h" != 'x'],
[ LUA_LIBS_CFLAGS="-DLUA_USE_READLINE $LUA_LIBS_CFLAGS"
$1
],
[$2])
])

View file

@ -1,61 +0,0 @@
# ===============================================================================
# https://www.gnu.org/software/autoconf-archive/ax_prog_dotnetcore_version.html
# ===============================================================================
#
# SYNOPSIS
#
# AX_PROG_DOTNETCORE_VERSION([VERSION],[ACTION-IF-TRUE],[ACTION-IF-FALSE])
#
# DESCRIPTION
#
# Makes sure that .NET Core supports the version indicated. If true the
# shell commands in ACTION-IF-TRUE are executed. If not the shell commands
# in ACTION-IF-FALSE are run. The $dotnetcore_version variable will be
# filled with the detected version.
#
# This macro uses the $DOTNETCORE variable to perform the check. If
# $DOTNETCORE is not set prior to calling this macro, the macro will fail.
#
# Example:
#
# AC_PATH_PROG([DOTNETCORE],[dotnet])
# AC_PROG_DOTNETCORE_VERSION([1.0.2],[ ... ],[ ... ])
#
# Searches for .NET Core, then checks if at least version 1.0.2 is
# present.
#
# LICENSE
#
# Copyright (c) 2016 Jens Geyer <jensg@apache.org>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 2
AC_DEFUN([AX_PROG_DOTNETCORE_VERSION],[
AC_REQUIRE([AC_PROG_SED])
AS_IF([test -n "$DOTNETCORE"],[
ax_dotnetcore_version="$1"
AC_MSG_CHECKING([for .NET Core version])
dotnetcore_version=`$DOTNETCORE --version 2>&1 | $SED -e 's/\(@<:@0-9@:>@*\.@<:@0-9@:>@*\.@<:@0-9@:>@*\)\(.*\)/\1/'`
AC_MSG_RESULT($dotnetcore_version)
AC_SUBST([DOTNETCORE_VERSION],[$dotnetcore_version])
AX_COMPARE_VERSION([$ax_dotnetcore_version],[le],[$dotnetcore_version],[
:
$2
],[
:
$3
])
],[
AC_MSG_WARN([could not find .NET Core])
$3
])
])

View file

@ -1,60 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_prog_haxe_version.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_PROG_HAXE_VERSION([VERSION],[ACTION-IF-TRUE],[ACTION-IF-FALSE])
#
# DESCRIPTION
#
# Makes sure that haxe supports the version indicated. If true the shell
# commands in ACTION-IF-TRUE are executed. If not the shell commands in
# ACTION-IF-FALSE are run. The $HAXE_VERSION variable will be filled with
# the detected version.
#
# This macro uses the $HAXE variable to perform the check. If $HAXE is not
# set prior to calling this macro, the macro will fail.
#
# Example:
#
# AC_PATH_PROG([HAXE],[haxe])
# AC_PROG_HAXE_VERSION([3.1.3],[ ... ],[ ... ])
#
# Searches for Haxe, then checks if at least version 3.1.3 is present.
#
# LICENSE
#
# Copyright (c) 2015 Jens Geyer <jensg@apache.org>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 2
AC_DEFUN([AX_PROG_HAXE_VERSION],[
AC_REQUIRE([AC_PROG_SED])
AS_IF([test -n "$HAXE"],[
ax_haxe_version="$1"
AC_MSG_CHECKING([for haxe version])
haxe_version=`$HAXE -version 2>&1 | $SED -e 's/^.* \( @<:@0-9@:>@*\.@<:@0-9@:>@*\.@<:@0-9@:>@*\) .*/\1/'`
AC_MSG_RESULT($haxe_version)
AC_SUBST([HAXE_VERSION],[$haxe_version])
AX_COMPARE_VERSION([$ax_haxe_version],[le],[$haxe_version],[
:
$2
],[
:
$3
])
],[
AC_MSG_WARN([could not find Haxe])
$3
])
])

View file

@ -1,77 +0,0 @@
# ===========================================================================
# https://www.gnu.org/software/autoconf-archive/ax_prog_perl_modules.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_PROG_PERL_MODULES([MODULES], [ACTION-IF-TRUE], [ACTION-IF-FALSE])
#
# DESCRIPTION
#
# Checks to see if the given perl modules are available. If true the shell
# commands in ACTION-IF-TRUE are executed. If not the shell commands in
# ACTION-IF-FALSE are run. Note if $PERL is not set (for example by
# calling AC_CHECK_PROG, or AC_PATH_PROG), AC_CHECK_PROG(PERL, perl, perl)
# will be run.
#
# MODULES is a space separated list of module names. To check for a
# minimum version of a module, append the version number to the module
# name, separated by an equals sign.
#
# Example:
#
# AX_PROG_PERL_MODULES( Text::Wrap Net::LDAP=1.0.3, ,
# AC_MSG_WARN(Need some Perl modules)
#
# LICENSE
#
# Copyright (c) 2009 Dean Povey <povey@wedgetail.com>
#
# Copying and distribution of this file, with or without modification, are
# permitted in any medium without royalty provided the copyright notice
# and this notice are preserved. This file is offered as-is, without any
# warranty.
#serial 8
AU_ALIAS([AC_PROG_PERL_MODULES], [AX_PROG_PERL_MODULES])
AC_DEFUN([AX_PROG_PERL_MODULES],[dnl
m4_define([ax_perl_modules])
m4_foreach([ax_perl_module], m4_split(m4_normalize([$1])),
[
m4_append([ax_perl_modules],
[']m4_bpatsubst(ax_perl_module,=,[ ])[' ])
])
# Make sure we have perl
if test -z "$PERL"; then
AC_CHECK_PROG(PERL,perl,perl)
fi
if test "x$PERL" != x; then
ax_perl_modules_failed=0
for ax_perl_module in ax_perl_modules; do
AC_MSG_CHECKING(for perl module $ax_perl_module)
# Would be nice to log result here, but can't rely on autoconf internals
$PERL -e "use $ax_perl_module; exit" > /dev/null 2>&1
if test $? -ne 0; then
AC_MSG_RESULT(no);
ax_perl_modules_failed=1
else
AC_MSG_RESULT(ok);
fi
done
# Run optional shell commands
if test "$ax_perl_modules_failed" = 0; then
:
$2
else
:
$3
fi
else
AC_MSG_WARN(could not find perl)
fi])dnl

View file

@ -1,127 +0,0 @@
dnl @synopsis AX_SIGNED_RIGHT_SHIFT
dnl
dnl Tests the behavior of a right shift on a negative signed int.
dnl
dnl This macro calls:
dnl AC_DEFINE(SIGNED_RIGHT_SHIFT_IS)
dnl AC_DEFINE(ARITHMETIC_RIGHT_SHIFT)
dnl AC_DEFINE(LOGICAL_RIGHT_SHIFT)
dnl AC_DEFINE(UNKNOWN_RIGHT_SHIFT)
dnl
dnl SIGNED_RIGHT_SHIFT_IS will be equal to one of the other macros.
dnl It also leaves the shell variables "ax_signed_right_shift"
dnl set to "arithmetic", "logical", or "unknown".
dnl
dnl NOTE: This macro does not work for cross-compiling.
dnl
dnl @category C
dnl @version 2009-03-25
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
AC_DEFUN([AX_SIGNED_RIGHT_SHIFT],
[
AC_MSG_CHECKING(the behavior of a signed right shift)
success_arithmetic=no
AC_RUN_IFELSE([AC_LANG_PROGRAM([[]], [[
return
/* 0xffffffff */
-1 >> 1 != -1 ||
-1 >> 2 != -1 ||
-1 >> 3 != -1 ||
-1 >> 4 != -1 ||
-1 >> 8 != -1 ||
-1 >> 16 != -1 ||
-1 >> 24 != -1 ||
-1 >> 31 != -1 ||
/* 0x80000000 */
(-2147483647 - 1) >> 1 != -1073741824 ||
(-2147483647 - 1) >> 2 != -536870912 ||
(-2147483647 - 1) >> 3 != -268435456 ||
(-2147483647 - 1) >> 4 != -134217728 ||
(-2147483647 - 1) >> 8 != -8388608 ||
(-2147483647 - 1) >> 16 != -32768 ||
(-2147483647 - 1) >> 24 != -128 ||
(-2147483647 - 1) >> 31 != -1 ||
/* 0x90800000 */
-1870659584 >> 1 != -935329792 ||
-1870659584 >> 2 != -467664896 ||
-1870659584 >> 3 != -233832448 ||
-1870659584 >> 4 != -116916224 ||
-1870659584 >> 8 != -7307264 ||
-1870659584 >> 16 != -28544 ||
-1870659584 >> 24 != -112 ||
-1870659584 >> 31 != -1 ||
0;
]])], [
success_arithmetic=yes
])
success_logical=no
AC_RUN_IFELSE([AC_LANG_PROGRAM([[]], [[
return
/* 0xffffffff */
-1 >> 1 != (signed)((unsigned)-1 >> 1) ||
-1 >> 2 != (signed)((unsigned)-1 >> 2) ||
-1 >> 3 != (signed)((unsigned)-1 >> 3) ||
-1 >> 4 != (signed)((unsigned)-1 >> 4) ||
-1 >> 8 != (signed)((unsigned)-1 >> 8) ||
-1 >> 16 != (signed)((unsigned)-1 >> 16) ||
-1 >> 24 != (signed)((unsigned)-1 >> 24) ||
-1 >> 31 != (signed)((unsigned)-1 >> 31) ||
/* 0x80000000 */
(-2147483647 - 1) >> 1 != (signed)((unsigned)(-2147483647 - 1) >> 1) ||
(-2147483647 - 1) >> 2 != (signed)((unsigned)(-2147483647 - 1) >> 2) ||
(-2147483647 - 1) >> 3 != (signed)((unsigned)(-2147483647 - 1) >> 3) ||
(-2147483647 - 1) >> 4 != (signed)((unsigned)(-2147483647 - 1) >> 4) ||
(-2147483647 - 1) >> 8 != (signed)((unsigned)(-2147483647 - 1) >> 8) ||
(-2147483647 - 1) >> 16 != (signed)((unsigned)(-2147483647 - 1) >> 16) ||
(-2147483647 - 1) >> 24 != (signed)((unsigned)(-2147483647 - 1) >> 24) ||
(-2147483647 - 1) >> 31 != (signed)((unsigned)(-2147483647 - 1) >> 31) ||
/* 0x90800000 */
-1870659584 >> 1 != (signed)((unsigned)-1870659584 >> 1) ||
-1870659584 >> 2 != (signed)((unsigned)-1870659584 >> 2) ||
-1870659584 >> 3 != (signed)((unsigned)-1870659584 >> 3) ||
-1870659584 >> 4 != (signed)((unsigned)-1870659584 >> 4) ||
-1870659584 >> 8 != (signed)((unsigned)-1870659584 >> 8) ||
-1870659584 >> 16 != (signed)((unsigned)-1870659584 >> 16) ||
-1870659584 >> 24 != (signed)((unsigned)-1870659584 >> 24) ||
-1870659584 >> 31 != (signed)((unsigned)-1870659584 >> 31) ||
0;
]])], [
success_logical=yes
])
AC_DEFINE([ARITHMETIC_RIGHT_SHIFT], 1, [Possible value for SIGNED_RIGHT_SHIFT_IS])
AC_DEFINE([LOGICAL_RIGHT_SHIFT], 2, [Possible value for SIGNED_RIGHT_SHIFT_IS])
AC_DEFINE([UNKNOWN_RIGHT_SHIFT], 3, [Possible value for SIGNED_RIGHT_SHIFT_IS])
if test "$success_arithmetic" = "yes" && test "$success_logical" = "yes" ; then
AC_MSG_ERROR("Right shift appears to be both arithmetic and logical!")
elif test "$success_arithmetic" = "yes" ; then
ax_signed_right_shift=arithmetic
AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 1,
[Indicates the effect of the right shift operator
on negative signed integers])
elif test "$success_logical" = "yes" ; then
ax_signed_right_shift=logical
AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 2,
[Indicates the effect of the right shift operator
on negative signed integers])
else
ax_signed_right_shift=unknown
AC_DEFINE([SIGNED_RIGHT_SHIFT_IS], 3,
[Indicates the effect of the right shift operator
on negative signed integers])
fi
AC_MSG_RESULT($ax_signed_right_shift)
])

View file

@ -1,28 +0,0 @@
dnl @synopsis AX_THRIFT_GEN(SHORT_LANGUAGE, LONG_LANGUAGE, DEFAULT)
dnl @synopsis AX_THRIFT_LIB(SHORT_LANGUAGE, LONG_LANGUAGE, DEFAULT)
dnl
dnl Allow a particular language generator to be disabled.
dnl Allow a particular language library to be disabled.
dnl
dnl These macros have poor error handling and are poorly documented.
dnl They are intended only for internal use by the Thrift compiler.
dnl
dnl @version 2008-02-20
dnl @license AllPermissive
dnl
dnl Copyright (C) 2009 David Reiss
dnl Copying and distribution of this file, with or without modification,
dnl are permitted in any medium without royalty provided the copyright
dnl notice and this notice are preserved.
AC_DEFUN([AX_THRIFT_LIB],
[
AC_ARG_WITH($1,
AC_HELP_STRING([--with-$1], [build the $2 library @<:@default=$3@:>@]),
[with_$1="$withval"],
[with_$1=$3]
)
have_$1=no
dnl What we do here is going to vary from library to library,
dnl so we can't really generalize (yet!).
])

View file

@ -19,7 +19,7 @@
# build Apache Thrift on AppVeyor - https://ci.appveyor.com
version: '1.0.0-dev.{build}'
version: '0.11.0.{build}'
shallow_clone: true

View file

@ -1,36 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_build.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
SET CMAKEARGS=^
-G'%GENERATOR%' ^
-DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
-DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^
-DCMAKE_MAKE_PROGRAM=/mingw64/bin/mingw32-make ^
-DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc.exe ^
-DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++.exe ^
-DWITH_LIBEVENT=OFF ^
-DWITH_PYTHON=OFF ^
-DWITH_SHARED_LIB=OFF ^
-DWITH_STATIC_LIB=ON
@ECHO ON
%BASH% -lc "mkdir -p %BUILDDIR_MSYS% && cd %BUILDDIR_MSYS% && cmake.exe %SRCDIR_MSYS% %CMAKEARGS% && cmake --build . --config %CONFIGURATION% --target install" || EXIT /B
@ECHO OFF

View file

@ -1,21 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
::
:: Appveyor install script for MinGW
:: Installs (or builds) third party packages we need
::
:: Same as the MSYS installation requirements
CALL build\appveyor\MSYS-appveyor-install.bat

View file

@ -1,16 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
:: Same as MSYS2
CALL build\appveyor\MSYS-appveyor-test.bat

View file

@ -1,47 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_build.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
MKDIR "%BUILDDIR%" || EXIT /B
CD "%BUILDDIR%" || EXIT /B
@ECHO ON
cmake "%SRCDIR%" ^
-G"%GENERATOR%" ^
-DBISON_EXECUTABLE=C:\ProgramData\chocolatey\lib\winflexbison3\tools\win_bison.exe ^
-DBOOST_ROOT="%BOOST_ROOT%" ^
-DBOOST_LIBRARYDIR="%BOOST_LIBRARYDIR%" ^
-DCMAKE_BUILD_TYPE="%CONFIGURATION%" ^
-DCMAKE_INSTALL_PREFIX="%INSTDIR%" ^
-DFLEX_EXECUTABLE=C:\ProgramData\chocolatey\lib\winflexbison3\tools\win_flex.exe ^
-DINTTYPES_ROOT="%WIN3P%\msinttypes" ^
-DLIBEVENT_ROOT="%WIN3P%\libevent-%LIBEVENT_VERSION%-stable" ^
-DOPENSSL_ROOT_DIR="%OPENSSL_ROOT%" ^
-DOPENSSL_USE_STATIC_LIBS=OFF ^
-DZLIB_LIBRARY="%WIN3P%\zlib-inst\lib\zlib%ZLIB_LIB_SUFFIX%.lib" ^
-DZLIB_ROOT="%WIN3P%\zlib-inst" ^
-DWITH_PYTHON=%WITH_PYTHON% ^
-DWITH_%THREADMODEL%THREADS=ON ^
-DWITH_SHARED_LIB=OFF ^
-DWITH_STATIC_LIB=ON || EXIT /B
@ECHO OFF
cmake --build . ^
--config "%CONFIGURATION%" ^
--target INSTALL || EXIT /B

View file

@ -1,72 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
::
:: Appveyor install script for MSVC
:: Installs (or builds) third party packages we need
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_install.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
CALL cl_showenv.bat || EXIT /B
MKDIR "%WIN3P%" || EXIT /B
:: Install ant - this also installs the latest JDK as a dependency
:: The installation of JDK requires us to pick up PATH and JAVE_HOME from the registry
cinst -c "%BUILDCACHE%" -y ant || EXIT /B
:: Install bison and flex
cinst -c "%BUILDCACHE%" -y winflexbison3 || EXIT /B
:: zlib
CD "%APPVEYOR_SCRIPTS%" || EXIT /B
call build-zlib.bat || EXIT /B
:: libevent
CD "%APPVEYOR_SCRIPTS%" || EXIT /B
call build-libevent.bat || EXIT /B
:: python packages
pip install backports.ssl_match_hostname ^
ipaddress ^
tornado ^
twisted || EXIT /B
:: msinttypes - for MSVC2010 only
SET MSINTTYPESURL=https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/msinttypes/msinttypes-r26.zip
IF "%COMPILER%" == "vc100" (
MKDIR "%WIN3P%\msinttypes" || EXIT /B
CD "%WIN3P%\msinttypes" || EXIT /B
appveyor DownloadFile "%MSINTTYPESURL%" || EXIT /B
7z x "msinttypes-r26.zip" || EXIT /B
)
:: appveyor build slaves do not have MSVC2010 Boost installed
IF "%COMPILER%" == "vc100" (
SET BITS=64
IF "%PLATFORM%" == "x86" (
SET BITS=32
)
SET BOOSTEXEURL=https://downloads.sourceforge.net/project/boost/boost-binaries/%BOOST_VERSION%/boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe
SET BOOSTEXE=C:\projects\thrift\buildcache\boost_%BOOST_VERSION:.=_%-msvc-10.0-!BITS!.exe
appveyor DownloadFile "!BOOSTEXEURL!" -FileName "!BOOSTEXE!" || EXIT /B
"!BOOSTEXE!" /dir=C:\Libraries\boost_%BOOST_VERSION:.=_% /silent || EXIT /B
)
:: Haskell (GHC) and cabal
cinst -c "%BUILDCACHE%" -y ghc || EXIT /B

View file

@ -1,25 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_test.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
CD "%BUILDDIR%" || EXIT /B
:: Add directories to the path to find DLLs of third party libraries so tests run
SET PATH=%BOOST_LIBRARYDIR%;%OPENSSL_ROOT%\bin;%WIN3P%\zlib-inst\bin;%PATH%
ctest -C %CONFIGURATION% --timeout 300 -VV -E "(%DISABLED_TESTS%)" || EXIT /B

View file

@ -1,47 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_build.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
SET BASH=C:\msys64\usr\bin\bash
SET CMAKE=/c/msys64/mingw64/bin/cmake.exe
@ECHO ON
SET CMAKEARGS=-G\"%GENERATOR%\" ^
-DBoost_DEBUG=ON ^
-DBoost_NAMESPACE=libboost ^
-DBOOST_INCLUDEDIR=%BOOST_INCLUDEDIR% ^
-DBOOST_LIBRARYDIR=%BOOST_LIBRARYDIR% ^
-DCMAKE_BUILD_TYPE=%CONFIGURATION% ^
-DCMAKE_C_COMPILER=gcc.exe ^
-DCMAKE_CXX_COMPILER=g++.exe ^
-DCMAKE_MAKE_PROGRAM=make.exe ^
-DCMAKE_INSTALL_PREFIX=%INSTDIR_MSYS% ^
-DOPENSSL_LIBRARIES=%OPENSSL_LIBRARIES% ^
-DOPENSSL_ROOT_DIR=%OPENSSL_ROOT% ^
-DOPENSSL_USE_STATIC_LIBS=ON ^
-DWITH_BOOST_STATIC=ON ^
-DWITH_JAVA=OFF ^
-DWITH_LIBEVENT=OFF ^
-DWITH_PYTHON=%WITH_PYTHON% ^
-DWITH_SHARED_LIB=OFF ^
-DWITH_STATIC_LIB=ON
%BASH% -lc "mkdir %BUILDDIR_MSYS% && cd %BUILDDIR_MSYS% && %CMAKE% %SRCDIR_MSYS% %CMAKEARGS% && %CMAKE% --build . --config %CONFIGURATION% --target install" || EXIT /B
@ECHO OFF

View file

@ -1,41 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
::
:: Appveyor install script for MSYS
:: Installs (or builds) third party packages we need
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_install.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
CALL cl_showenv.bat || EXIT /B
SET PACKAGES=^
--needed -S bison flex ^
make ^
mingw-w64-x86_64-boost ^
mingw-w64-x86_64-cmake ^
mingw-w64-x86_64-openssl ^
mingw-w64-x86_64-toolchain ^
mingw-w64-x86_64-zlib
:: omitting libevent-devel for now it is version 2.1.4 and doesn't play nice with MinGW
%BASH% -lc "pacman --noconfirm -Syu" || EXIT /B
%BASH% -lc "pacman --noconfirm -Su" || EXIT /B
%BASH% -lc "pacman --noconfirm %PACKAGES%" || EXIT /B

View file

@ -1,26 +0,0 @@
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@ECHO OFF
SETLOCAL EnableDelayedExpansion
CD build\appveyor || EXIT /B
CALL cl_banner_test.bat || EXIT /B
CALL cl_setenv.bat || EXIT /B
CD "%BUILDDIR%" || EXIT /B
:: randomly fails on mingw; see Jira THRIFT-4106
SET DISABLED_TESTS=concurrency_test
%BASH% -lc "cd %BUILDDIR_MSYS% && ctest.exe -C %CONFIGURATION% --timeout 300 -VV -E '(%DISABLED_TESTS%)'" || EXIT /B

View file

@ -1,34 +0,0 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Appveyor Build
Appveyor is capable of building MSVC 2010 through 2015 as well as
having the latest MSYS2/MinGW 64-bit environment. It has many versions
of boost and python installed as well. See what appveyor has
[installed on build workers](https://www.appveyor.com/docs/installed-software/).
We run a matrix build on Appveyor and build the following combinations:
* MinGW x64 (gcc 6.3.0)
* MSVC 2010 x86, an older boost, an older python
* MSVC 2015 x86/x64, the latest boost, the latest python
* MSYS2 x64 (gcc 6.3.0) - this is a work in progress
The Appveyor script takes the first four letters from the PROFILE specified in
the environment stanza and runs these scripts in order:
????-appveyor-install.bat will install third party libraries and set up the environment
????-appveyor-build.bat will build with cmake
????-appveyor-test.bat will run ctest

Some files were not shown because too many files have changed in this diff Show more