Dep helper (#2151)

* Add dep task to update go dependencies

* Update go dependencies
This commit is contained in:
Manuel Alejandro de Brito Fontes 2018-09-29 19:47:07 -03:00 committed by Miek Gieben
parent 8f8b81f56b
commit 0e8977761d
764 changed files with 172 additions and 267451 deletions

View file

@ -95,7 +95,7 @@ $ go get -u github.com/golang/dep/cmd/dep
Use the following to update the locked versions of all dependencies
```sh
$ dep ensure -update
$ make dep-ensure
```
To add a dependency to the project, you might run

184
Gopkg.lock generated
View file

@ -2,27 +2,34 @@
[[projects]]
digest = "1:6aa683f91e93784ec7b76fe1568492f419aa3aa0b72f2087912c8c2cada7a375"
name = "github.com/DataDog/dd-trace-go"
packages = [
"opentracing",
"tracer",
"tracer/ext"
"tracer/ext",
]
pruneopts = ""
revision = "27617015d45e6cd550b9a7ac7715c37cc2f7d020"
version = "v0.6.1"
[[projects]]
digest = "1:9362b2212139b7821f73a86169bf80ce6b0264956f87d82ab3aeedb2b5c08fea"
name = "github.com/Shopify/sarama"
packages = ["."]
pruneopts = ""
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
version = "v1.17.0"
[[projects]]
digest = "1:58a1a132df6267aff83b2bf0ab62f42571695e6e74335107deafbb7ca55db4fc"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
pruneopts = ""
revision = "e59b73d3c2bf1c328ccb78e683c0462fa1a473c7"
[[projects]]
digest = "1:8a047cbc1be2f1578abaa9ba4daf4c87992de1121149632ad9147ee536b53031"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@ -53,12 +60,14 @@
"private/protocol/xml/xmlutil",
"service/route53",
"service/route53/route53iface",
"service/sts"
"service/sts",
]
pruneopts = ""
revision = "852052a10992d92f68b9a60862a3312292524903"
version = "v1.14.17"
[[projects]]
digest = "1:96637fd1b47c35bd2fc49969138b40ad6b66166e2e36664ac5ffd97c93542e62"
name = "github.com/coreos/etcd"
packages = [
"auth/authpb",
@ -66,204 +75,262 @@
"etcdserver/api/v3rpc/rpctypes",
"etcdserver/etcdserverpb",
"mvcc/mvccpb",
"pkg/types"
"pkg/types",
]
pruneopts = ""
revision = "33245c6b5b49130ca99280408fadfab01aac0e48"
version = "v3.3.8"
[[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:1376ae7eff7cb0f47cf003d1ce49202dad343a7494e97bb6a54aff1ae5eb9397"
name = "github.com/dnstap/golang-dnstap"
packages = ["."]
pruneopts = ""
revision = "2cf77a2b5e11ac8d0ba3892772ac8e1f7b528344"
[[projects]]
digest = "1:6d6672f85a84411509885eaa32f597577873de00e30729b9bb0eb1e1faa49c12"
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
pruneopts = ""
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:1f7503fa58a852a1416556ae2ddb219b49a1304fd408391948e2e3676514c48d"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
pruneopts = ""
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
digest = "1:d8d46d21073d0f65daf1740ebf4629c65e04bf92e14ce93c2201e8624843c3d3"
name = "github.com/eapache/queue"
packages = ["."]
pruneopts = ""
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:3ef2b40796b5a4ac1b0da237c6e1bc31aae513d3b86cdb0506198ba49f33b458"
name = "github.com/farsightsec/golang-framestream"
packages = ["."]
pruneopts = ""
revision = "c06a5734334d9629b3db143d74b47eb94ea68612"
[[projects]]
digest = "1:b13707423743d41665fd23f0c36b2f37bb49c30e94adb813319c44188a51ba22"
name = "github.com/ghodss/yaml"
packages = ["."]
pruneopts = ""
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
digest = "1:617b3e0f5989d4ff866a1820480990c65dfc9257eb080da749a45e2d76681b02"
name = "github.com/go-ini/ini"
packages = ["."]
pruneopts = ""
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = ""
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:0a3f6a0c68ab8f3d455f8892295503b179e571b7fefe47cc6c556405d1f83411"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys"
"sortkeys",
]
pruneopts = ""
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
branch = "master"
digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = ""
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
digest = "1:f958a1c137db276e52f0b50efee41a1a389dcdded59a69711f3e872757dab34b"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
"ptypes/timestamp",
]
pruneopts = ""
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:2a5888946cdbc8aa360fd43301f9fc7869d663f60d5eedae7d4e6e5e4f06f2bf"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = ""
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
branch = "master"
digest = "1:be28c0531a755f2178acf1e327e6f5a8a3968feb5f2567cdc968064253141751"
name = "github.com/google/btree"
packages = ["."]
pruneopts = ""
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
branch = "master"
digest = "1:754f77e9c839b24778a4b64422236d38515301d2baeb63113aa3edc42e6af692"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = ""
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:16b2837c8b3cf045fa2cdc82af0cf78b19582701394484ae76b2c3bc3c99ad73"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions"
"extensions",
]
pruneopts = ""
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:009a1928b8c096338b68b5822d838a72b4d8520715c1463614476359f3282ec8"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache"
"diskcache",
]
pruneopts = ""
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
[[projects]]
branch = "master"
digest = "1:0c93b4970c254d3f8f76c3e8eb72571d6f24a15415f9c418ecb0801ba5765a90"
name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"]
pruneopts = ""
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
[[projects]]
branch = "master"
digest = "1:9c776d7d9c54b7ed89f119e449983c3f24c0023e75001d6092442412ebca6b94"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
"simplelru",
]
pruneopts = ""
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
digest = "1:302c6eb8e669c997bec516a138b8fc496018faa1ece4c13e445a2749fbe079bb"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = ""
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
version = "v0.3.5"
[[projects]]
digest = "1:6f49eae0c1e5dab1dafafee34b207aeb7a42303105960944828c2079b92fc88e"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
pruneopts = ""
revision = "0b12d6b5"
[[projects]]
digest = "1:53ac4e911e12dde0ab68655e2006449d207a5a681f084974da2b06e5dbeaca72"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = ""
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
version = "1.1.4"
[[projects]]
branch = "master"
digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = ""
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:63722a4b1e1717be7b98fc686e0b30d5e7f734b9e93d7dee86293b6deab7ea28"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = ""
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:0c0ff2a89c1bb0d01887e1dac043ad7efbf3ec77482ef058ac423d13497e16fd"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = ""
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = ""
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
branch = "master"
digest = "1:2da0e5077ed40453dc281b9a2428d84cf6ad14063aed189f6296ca5dd25cf13d"
name = "github.com/opentracing-contrib/go-observer"
packages = ["."]
pruneopts = ""
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
[[projects]]
digest = "1:78fb99d6011c2ae6c72f3293a83951311147b12b06a5ffa43abf750c4fab6ac5"
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log"
"log",
]
pruneopts = ""
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
digest = "1:fea0e67285d900e5a0a7ec19ff4b4c82865a28dddbee8454c5360ad908f7069c"
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [
".",
@ -271,73 +338,93 @@
"thrift/gen-go/scribe",
"thrift/gen-go/zipkincore",
"types",
"wire"
"wire",
]
pruneopts = ""
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
version = "v0.3.4"
[[projects]]
branch = "master"
digest = "1:c24598ffeadd2762552269271b3b1510df2d83ee6696c1e543a0ff653af494bc"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = ""
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:b46305723171710475f2dd37547edd57b67b9de9f2a6267cafdd98331fd6897f"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = ""
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:29e34e58f26655c4d73135cdfc0517ea2ff1483eff34e5d5ef4b6fddbb81e31b"
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32"
"internal/xxh32",
]
pruneopts = ""
revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00"
version = "v2.0.3"
[[projects]]
branch = "master"
digest = "1:60aca47f4eeeb972f1b9da7e7db51dee15ff6c59f7b401c1588b8e6771ba15ef"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = ""
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
digest = "1:bfbc121ef802d245ef67421cff206615357d9202337a3d492b8f668906b485a8"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
"model",
]
pruneopts = ""
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
digest = "1:15bcdc717654ef21128e8af3a63eec39a6d08a830e297f93d65163f87c8eb523"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = ""
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:8e243c568f36b09031ec18dff5f7d2769dcf5ca4d624ea511c8e3197dc3d352d"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = ""
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:bfc538280d91f15c117ed9cd9c1c24ced21999c2945c6982d338a6fd207592ce"
name = "github.com/ugorji/go"
packages = ["codec"]
pruneopts = ""
revision = "f3cacc17c85ecb7f1b6a9e373ee85d1480919868"
[[projects]]
branch = "master"
digest = "1:6ef14be530be39b6b9d75d54ce1d546ae9231e652d9e3eef198cbb19ce8ed3e7"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = ""
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
[[projects]]
branch = "master"
digest = "1:cef4c63f5c97c27ab65498fd8de6661df17b31e40cd3b4f6f1e217e97f0e23f3"
name = "golang.org/x/net"
packages = [
"context",
@ -346,20 +433,24 @@
"http2/hpack",
"idna",
"internal/timeseries",
"trace"
"trace",
]
pruneopts = ""
revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9"
[[projects]]
branch = "master"
digest = "1:274e6fab68b7f298bf3f70bd60d4ba0c55284d1d2034175fb3324924268ccd9e"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
"windows",
]
pruneopts = ""
revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56"
[[projects]]
digest = "1:5acd3512b047305d49e8763eef7ba423901e85d5dd2fd1e71778a0ea8de10bd4"
name = "golang.org/x/text"
packages = [
"collate",
@ -375,24 +466,30 @@
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
"unicode/rangetable",
]
pruneopts = ""
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:55a681cb66f28755765fa5fa5104cbd8dc85c55c02d206f9f89566451e3fe1aa"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = ""
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
digest = "1:a5959f4640612317b0d3122569b7c02565ba6277aa0374cff2ed610c81ef8d74"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = ""
revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
[[projects]]
digest = "1:5f31b45ee9da7a87f140bef3ed0a7ca34ea2a6d38eb888123b8e28170e8aa4f2"
name = "google.golang.org/grpc"
packages = [
".",
@ -420,24 +517,30 @@
"stats",
"status",
"tap",
"transport"
"transport",
]
pruneopts = ""
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
[[projects]]
digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = ""
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:f0620375dd1f6251d9973b5f2596228cc8042e887cd7f827e4220bc1ce8c30e2"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = ""
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
digest = "1:f420c8548c93242d8e5dcfa5b34e0243883b4e660f65076e869daafac877144d"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
@ -468,11 +571,13 @@
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
"storage/v1beta1",
]
pruneopts = ""
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
[[projects]]
digest = "1:b6b2fb7b4da1ac973b64534ace2299a02504f16bc7820cb48edb8ca4077183e1"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
@ -515,11 +620,13 @@
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect"
"third_party/forked/golang/reflect",
]
pruneopts = ""
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
[[projects]]
digest = "1:d04779a8de7d5465e0463bd986506348de5e89677c74777f695d3145a7a8d15e"
name = "k8s.io/client-go"
packages = [
"discovery",
@ -609,20 +716,57 @@
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/retry"
"util/retry",
]
pruneopts = ""
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "v8.0.0"
[[projects]]
branch = "master"
digest = "1:7b06ff480fd71dead51f0f243b573c448c372ec086b790ec7ed4f8a78f2c1cbf"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = ""
revision = "9dfdf9be683f61f82cda12362c44c784e0778b56"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ca446af2026e042acf8d1bb2014aee378cdd577c43d96a75b52e51b4ec4e85ad"
input-imports = [
"github.com/DataDog/dd-trace-go/opentracing",
"github.com/aws/aws-sdk-go/aws",
"github.com/aws/aws-sdk-go/aws/credentials",
"github.com/aws/aws-sdk-go/aws/request",
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/route53",
"github.com/aws/aws-sdk-go/service/route53/route53iface",
"github.com/coreos/etcd/clientv3",
"github.com/coreos/etcd/mvcc/mvccpb",
"github.com/dnstap/golang-dnstap",
"github.com/farsightsec/golang-framestream",
"github.com/golang/protobuf/proto",
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc",
"github.com/matttproud/golang_protobuf_extensions/pbutil",
"github.com/opentracing/opentracing-go",
"github.com/openzipkin/zipkin-go-opentracing",
"github.com/prometheus/client_model/go",
"github.com/prometheus/common/expfmt",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
"google.golang.org/grpc/grpclog",
"google.golang.org/grpc/peer",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/watch",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/rest",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/client-go/tools/clientcmd/api",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -96,3 +96,10 @@ presubmit:
clean:
go clean
rm -f coredns
.PHONY: dep-ensure
dep-ensure:
dep version || go get -u github.com/golang/dep/cmd/dep
dep ensure -v
dep prune -v
find vendor -name '*_test.go' -delete

View file

@ -1,58 +0,0 @@
package opentracing
import (
"testing"
ot "github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
)
func TestConfigurationDefaults(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
assert.Equal(true, config.Enabled)
assert.Equal(false, config.Debug)
assert.Equal(float64(1), config.SampleRate)
assert.Equal("opentracing.test", config.ServiceName)
assert.Equal("localhost", config.AgentHostname)
assert.Equal("8126", config.AgentPort)
}
func TestConfiguration(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
config.SampleRate = 0
config.ServiceName = "api-intake"
config.AgentHostname = "ddagent.consul.local"
config.AgentPort = "58126"
tracer, closer, err := NewTracer(config)
assert.NotNil(tracer)
assert.NotNil(closer)
assert.Nil(err)
assert.Equal("api-intake", tracer.(*Tracer).config.ServiceName)
}
func TestTracerServiceName(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
config.ServiceName = ""
tracer, closer, err := NewTracer(config)
assert.Nil(tracer)
assert.Nil(closer)
assert.NotNil(err)
assert.Equal("A Datadog Tracer requires a valid `ServiceName` set", err.Error())
}
func TestDisabledTracer(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
config.Enabled = false
tracer, closer, err := NewTracer(config)
assert.IsType(&ot.NoopTracer{}, tracer)
assert.IsType(&noopCloser{}, closer)
assert.Nil(err)
}

View file

@ -1,29 +0,0 @@
package opentracing
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSpanContextBaggage(t *testing.T) {
assert := assert.New(t)
ctx := SpanContext{}
ctx = ctx.WithBaggageItem("key", "value")
assert.Equal("value", ctx.baggage["key"])
}
func TestSpanContextIterator(t *testing.T) {
assert := assert.New(t)
baggageIterator := make(map[string]string)
ctx := SpanContext{baggage: map[string]string{"key": "value"}}
ctx.ForeachBaggageItem(func(k, v string) bool {
baggageIterator[k] = v
return true
})
assert.Len(baggageIterator, 1)
assert.Equal("value", baggageIterator["key"])
}

View file

@ -1,30 +0,0 @@
package opentracing_test
import (
"context"
opentracing "github.com/opentracing/opentracing-go"
)
// You can leverage the Golang `Context` for intra-process propagation of
// Spans. In this example we create a root Span, so that it can be reused
// in a nested function to create a child Span.
func Example_startContext() {
// create a new root Span and return a new Context that includes
// the Span itself
ctx := context.Background()
rootSpan, ctx := opentracing.StartSpanFromContext(ctx, "web.request")
defer rootSpan.Finish()
requestHandler(ctx)
}
func requestHandler(ctx context.Context) {
// retrieve the previously set root Span
span := opentracing.SpanFromContext(ctx)
span.SetTag("resource.name", "/")
// or simply create a new child Span from the previous Context
childSpan, _ := opentracing.StartSpanFromContext(ctx, "sql.query")
defer childSpan.Finish()
}

View file

@ -1,30 +0,0 @@
package opentracing_test
import (
// ddtrace namespace is suggested
ddtrace "github.com/DataDog/dd-trace-go/opentracing"
opentracing "github.com/opentracing/opentracing-go"
)
func Example_initialization() {
// create a Tracer configuration
config := ddtrace.NewConfiguration()
config.ServiceName = "api-intake"
config.AgentHostname = "ddagent.consul.local"
// initialize a Tracer and ensure a graceful shutdown
// using the `closer.Close()`
tracer, closer, err := ddtrace.NewTracer(config)
if err != nil {
// handle the configuration error
}
defer closer.Close()
// set the Datadog tracer as a GlobalTracer
opentracing.SetGlobalTracer(tracer)
startWebServer()
}
func startWebServer() {
// start a web server
}

View file

@ -1,24 +0,0 @@
package opentracing_test
import (
opentracing "github.com/opentracing/opentracing-go"
)
// You can use the GlobalTracer to create a root Span. If you need to create a hierarchy,
// simply use the `ChildOf` reference
func Example_startSpan() {
// use the GlobalTracer previously set
rootSpan := opentracing.StartSpan("web.request")
defer rootSpan.Finish()
// set the reference to create a hierarchy of spans
reference := opentracing.ChildOf(rootSpan.Context())
childSpan := opentracing.StartSpan("sql.query", reference)
defer childSpan.Finish()
dbQuery()
}
func dbQuery() {
// start a database query
}

View file

@ -1,81 +0,0 @@
package opentracing
import (
"net/http"
"strconv"
"testing"
opentracing "github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
)
func TestTracerPropagationDefaults(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
root := tracer.StartSpan("web.request")
ctx := root.Context()
headers := http.Header{}
// inject the SpanContext
carrier := opentracing.HTTPHeadersCarrier(headers)
err := tracer.Inject(ctx, opentracing.HTTPHeaders, carrier)
assert.Nil(err)
// retrieve the SpanContext
propagated, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
assert.Nil(err)
tCtx, ok := ctx.(SpanContext)
assert.True(ok)
tPropagated, ok := propagated.(SpanContext)
assert.True(ok)
// compare if there is a Context match
assert.Equal(tCtx.traceID, tPropagated.traceID)
assert.Equal(tCtx.spanID, tPropagated.spanID)
// ensure a child can be created
child := tracer.StartSpan("db.query", opentracing.ChildOf(propagated))
tRoot, ok := root.(*Span)
assert.True(ok)
tChild, ok := child.(*Span)
assert.True(ok)
assert.NotEqual(uint64(0), tChild.Span.TraceID)
assert.NotEqual(uint64(0), tChild.Span.SpanID)
assert.Equal(tRoot.Span.SpanID, tChild.Span.ParentID)
assert.Equal(tRoot.Span.TraceID, tChild.Span.ParentID)
tid := strconv.FormatUint(tRoot.Span.TraceID, 10)
pid := strconv.FormatUint(tRoot.Span.SpanID, 10)
// hardcode header names to fail test if defaults are changed
assert.Equal(headers.Get("x-datadog-trace-id"), tid)
assert.Equal(headers.Get("x-datadog-parent-id"), pid)
}
func TestTracerTextMapPropagationHeader(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
config.TextMapPropagator = NewTextMapPropagator("bg-", "tid", "pid")
tracer, _, _ := NewTracer(config)
root := tracer.StartSpan("web.request").SetBaggageItem("item", "x").(*Span)
ctx := root.Context()
headers := http.Header{}
carrier := opentracing.HTTPHeadersCarrier(headers)
err := tracer.Inject(ctx, opentracing.HTTPHeaders, carrier)
assert.Nil(err)
tid := strconv.FormatUint(root.Span.TraceID, 10)
pid := strconv.FormatUint(root.Span.SpanID, 10)
assert.Equal(headers.Get("tid"), tid)
assert.Equal(headers.Get("pid"), pid)
assert.Equal(headers.Get("bg-item"), "x")
}

View file

@ -1,129 +0,0 @@
package opentracing
import (
"errors"
"testing"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
)
func TestSpanBaggage(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
span.SetBaggageItem("key", "value")
assert.Equal("value", span.BaggageItem("key"))
}
func TestSpanContext(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
assert.NotNil(span.Context())
}
func TestSpanOperationName(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
span.SetOperationName("http.request")
assert.Equal("http.request", span.Span.Name)
}
func TestSpanFinish(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
span.Finish()
assert.True(span.Span.Duration > 0)
}
func TestSpanFinishWithTime(t *testing.T) {
assert := assert.New(t)
finishTime := time.Now().Add(10 * time.Second)
span := NewSpan("web.request")
span.FinishWithOptions(opentracing.FinishOptions{FinishTime: finishTime})
duration := finishTime.UnixNano() - span.Span.Start
assert.Equal(duration, span.Span.Duration)
}
func TestSpanSetTag(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
span.SetTag("component", "tracer")
assert.Equal("tracer", span.Meta["component"])
span.SetTag("tagInt", 1234)
assert.Equal("1234", span.Meta["tagInt"])
}
func TestSpanSetDatadogTags(t *testing.T) {
assert := assert.New(t)
span := NewSpan("web.request")
span.SetTag("span.type", "http")
span.SetTag("service.name", "db-cluster")
span.SetTag("resource.name", "SELECT * FROM users;")
assert.Equal("http", span.Span.Type)
assert.Equal("db-cluster", span.Span.Service)
assert.Equal("SELECT * FROM users;", span.Span.Resource)
}
func TestSpanSetErrorTag(t *testing.T) {
assert := assert.New(t)
for _, tt := range []struct {
name string // span name
val interface{} // tag value
msg string // error message
typ string // error type
}{
{
name: "error.error",
val: errors.New("some error"),
msg: "some error",
typ: "*errors.errorString",
},
{
name: "error.string",
val: "some string error",
msg: "some string error",
typ: "*errors.errorString",
},
{
name: "error.struct",
val: struct{ N int }{5},
msg: "{5}",
typ: "*errors.errorString",
},
{
name: "error.other",
val: 1,
msg: "1",
typ: "*errors.errorString",
},
{
name: "error.nil",
val: nil,
msg: "",
typ: "",
},
} {
span := NewSpan(tt.name)
span.SetTag(Error, tt.val)
assert.Equal(span.Meta["error.msg"], tt.msg)
assert.Equal(span.Meta["error.type"], tt.typ)
if tt.val != nil {
assert.NotEqual(span.Meta["error.stack"], "")
}
}
}

View file

@ -1,131 +0,0 @@
package opentracing
import (
"testing"
"time"
ddtrace "github.com/DataDog/dd-trace-go/tracer"
opentracing "github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
)
func TestDefaultTracer(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
tTracer, ok := tracer.(*Tracer)
assert.True(ok)
assert.Equal(tTracer.impl, ddtrace.DefaultTracer)
}
func TestTracerStartSpan(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
span, ok := tracer.StartSpan("web.request").(*Span)
assert.True(ok)
assert.NotEqual(uint64(0), span.Span.TraceID)
assert.NotEqual(uint64(0), span.Span.SpanID)
assert.Equal(uint64(0), span.Span.ParentID)
assert.Equal("web.request", span.Span.Name)
assert.Equal("opentracing.test", span.Span.Service)
assert.NotNil(span.Span.Tracer())
}
func TestTracerStartChildSpan(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
root := tracer.StartSpan("web.request")
child := tracer.StartSpan("db.query", opentracing.ChildOf(root.Context()))
tRoot, ok := root.(*Span)
assert.True(ok)
tChild, ok := child.(*Span)
assert.True(ok)
assert.NotEqual(uint64(0), tChild.Span.TraceID)
assert.NotEqual(uint64(0), tChild.Span.SpanID)
assert.Equal(tRoot.Span.SpanID, tChild.Span.ParentID)
assert.Equal(tRoot.Span.TraceID, tChild.Span.ParentID)
}
func TestTracerBaggagePropagation(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
root := tracer.StartSpan("web.request")
root.SetBaggageItem("key", "value")
child := tracer.StartSpan("db.query", opentracing.ChildOf(root.Context()))
context, ok := child.Context().(SpanContext)
assert.True(ok)
assert.Equal("value", context.baggage["key"])
}
func TestTracerBaggageImmutability(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
root := tracer.StartSpan("web.request")
root.SetBaggageItem("key", "value")
child := tracer.StartSpan("db.query", opentracing.ChildOf(root.Context()))
child.SetBaggageItem("key", "changed!")
parentContext, ok := root.Context().(SpanContext)
assert.True(ok)
childContext, ok := child.Context().(SpanContext)
assert.True(ok)
assert.Equal("value", parentContext.baggage["key"])
assert.Equal("changed!", childContext.baggage["key"])
}
func TestTracerSpanTags(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
tag := opentracing.Tag{Key: "key", Value: "value"}
span, ok := tracer.StartSpan("web.request", tag).(*Span)
assert.True(ok)
assert.Equal("value", span.Span.Meta["key"])
}
func TestTracerSpanGlobalTags(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
config.GlobalTags["key"] = "value"
tracer, _, _ := NewTracer(config)
span := tracer.StartSpan("web.request").(*Span)
assert.Equal("value", span.Span.Meta["key"])
child := tracer.StartSpan("db.query", opentracing.ChildOf(span.Context())).(*Span)
assert.Equal("value", child.Span.Meta["key"])
}
func TestTracerSpanStartTime(t *testing.T) {
assert := assert.New(t)
config := NewConfiguration()
tracer, _, _ := NewTracer(config)
startTime := time.Now().Add(-10 * time.Second)
span, ok := tracer.StartSpan("web.request", opentracing.StartTime(startTime)).(*Span)
assert.True(ok)
assert.Equal(startTime.UnixNano(), span.Span.Start)
}

View file

@ -1,105 +0,0 @@
package tracer
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const (
testInitSize = 2
testMaxSize = 5
)
func TestSpanBufferPushOne(t *testing.T) {
assert := assert.New(t)
buffer := newSpanBuffer(newTracerChans(), testInitSize, testMaxSize)
assert.NotNil(buffer)
assert.Len(buffer.spans, 0)
traceID := NextSpanID()
root := NewSpan("name1", "a-service", "a-resource", traceID, traceID, 0, nil)
root.buffer = buffer
buffer.Push(root)
assert.Len(buffer.spans, 1, "there is one span in the buffer")
assert.Equal(root, buffer.spans[0], "the span is the one pushed before")
root.Finish()
select {
case trace := <-buffer.channels.trace:
assert.Len(trace, 1, "there was a trace in the channel")
assert.Equal(root, trace[0], "the trace in the channel is the one pushed before")
assert.Equal(0, buffer.Len(), "no more spans in the buffer")
case err := <-buffer.channels.err:
assert.Fail("unexpected error:", err.Error())
t.Logf("buffer: %v", buffer)
}
}
func TestSpanBufferPushNoFinish(t *testing.T) {
assert := assert.New(t)
buffer := newSpanBuffer(newTracerChans(), testInitSize, testMaxSize)
assert.NotNil(buffer)
assert.Len(buffer.spans, 0)
traceID := NextSpanID()
root := NewSpan("name1", "a-service", "a-resource", traceID, traceID, 0, nil)
root.buffer = buffer
buffer.Push(root)
assert.Len(buffer.spans, 1, "there is one span in the buffer")
assert.Equal(root, buffer.spans[0], "the span is the one pushed before")
select {
case <-buffer.channels.trace:
assert.Fail("span was not finished, should not be flushed")
t.Logf("buffer: %v", buffer)
case err := <-buffer.channels.err:
assert.Fail("unexpected error:", err.Error())
t.Logf("buffer: %v", buffer)
case <-time.After(time.Second / 10):
t.Logf("expected timeout, nothing should show up in buffer as the trace is not finished")
}
}
func TestSpanBufferPushSeveral(t *testing.T) {
assert := assert.New(t)
buffer := newSpanBuffer(newTracerChans(), testInitSize, testMaxSize)
assert.NotNil(buffer)
assert.Len(buffer.spans, 0)
traceID := NextSpanID()
root := NewSpan("name1", "a-service", "a-resource", traceID, traceID, 0, nil)
span2 := NewSpan("name2", "a-service", "a-resource", NextSpanID(), traceID, root.SpanID, nil)
span3 := NewSpan("name3", "a-service", "a-resource", NextSpanID(), traceID, root.SpanID, nil)
span3a := NewSpan("name3", "a-service", "a-resource", NextSpanID(), traceID, span3.SpanID, nil)
spans := []*Span{root, span2, span3, span3a}
for i, span := range spans {
span.buffer = buffer
buffer.Push(span)
assert.Len(buffer.spans, i+1, "there is one more span in the buffer")
assert.Equal(span, buffer.spans[i], "the span is the one pushed before")
}
for _, span := range spans {
span.Finish()
}
select {
case trace := <-buffer.channels.trace:
assert.Len(trace, 4, "there was one trace with the right number of spans in the channel")
for _, span := range spans {
assert.Contains(trace, span, "the trace contains the spans")
}
case err := <-buffer.channels.err:
assert.Fail("unexpected error:", err.Error())
}
}

View file

@ -1,117 +0,0 @@
package tracer
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestPushTrace(t *testing.T) {
assert := assert.New(t)
channels := newTracerChans()
trace := []*Span{
&Span{
Name: "pylons.request",
Service: "pylons",
Resource: "/",
},
&Span{
Name: "pylons.request",
Service: "pylons",
Resource: "/foo",
},
}
channels.pushTrace(trace)
assert.Len(channels.trace, 1, "there should be data in channel")
assert.Len(channels.traceFlush, 0, "no flush requested yet")
pushed := <-channels.trace
assert.Equal(trace, pushed)
many := traceChanLen/2 + 1
for i := 0; i < many; i++ {
channels.pushTrace(make([]*Span, i))
}
assert.Len(channels.trace, many, "all traces should be in the channel, not yet blocking")
assert.Len(channels.traceFlush, 1, "a trace flush should have been requested")
for i := 0; i < cap(channels.trace); i++ {
channels.pushTrace(make([]*Span, i))
}
assert.Len(channels.trace, traceChanLen, "buffer should be full")
assert.NotEqual(0, len(channels.err), "there should be an error logged")
err := <-channels.err
assert.Equal(&errorTraceChanFull{Len: traceChanLen}, err)
}
func TestPushService(t *testing.T) {
assert := assert.New(t)
channels := newTracerChans()
service := Service{
Name: "redis-master",
App: "redis",
AppType: "db",
}
channels.pushService(service)
assert.Len(channels.service, 1, "there should be data in channel")
assert.Len(channels.serviceFlush, 0, "no flush requested yet")
pushed := <-channels.service
assert.Equal(service, pushed)
many := serviceChanLen/2 + 1
for i := 0; i < many; i++ {
channels.pushService(Service{
Name: fmt.Sprintf("service%d", i),
App: "custom",
AppType: "web",
})
}
assert.Len(channels.service, many, "all services should be in the channel, not yet blocking")
assert.Len(channels.serviceFlush, 1, "a service flush should have been requested")
for i := 0; i < cap(channels.service); i++ {
channels.pushService(Service{
Name: fmt.Sprintf("service%d", i),
App: "custom",
AppType: "web",
})
}
assert.Len(channels.service, serviceChanLen, "buffer should be full")
assert.NotEqual(0, len(channels.err), "there should be an error logged")
err := <-channels.err
assert.Equal(&errorServiceChanFull{Len: serviceChanLen}, err)
}
func TestPushErr(t *testing.T) {
assert := assert.New(t)
channels := newTracerChans()
err := fmt.Errorf("ooops")
channels.pushErr(err)
assert.Len(channels.err, 1, "there should be data in channel")
assert.Len(channels.errFlush, 0, "no flush requested yet")
pushed := <-channels.err
assert.Equal(err, pushed)
many := errChanLen/2 + 1
for i := 0; i < many; i++ {
channels.pushErr(fmt.Errorf("err %d", i))
}
assert.Len(channels.err, many, "all errs should be in the channel, not yet blocking")
assert.Len(channels.errFlush, 1, "a err flush should have been requested")
for i := 0; i < cap(channels.err); i++ {
channels.pushErr(fmt.Errorf("err %d", i))
}
// if we reach this, means pushErr is not blocking, which is what we want to double-check
}

View file

@ -1,69 +0,0 @@
package tracer
import (
"testing"
"context"
"github.com/stretchr/testify/assert"
)
func TestContextWithSpanDefault(t *testing.T) {
assert := assert.New(t)
// create a new context with a span
span := SpanFromContextDefault(nil)
assert.NotNil(span)
ctx := context.Background()
assert.NotNil(SpanFromContextDefault(ctx))
}
func TestSpanFromContext(t *testing.T) {
assert := assert.New(t)
// create a new context with a span
ctx := context.Background()
tracer := NewTracer()
expectedSpan := tracer.NewRootSpan("pylons.request", "pylons", "/")
ctx = ContextWithSpan(ctx, expectedSpan)
span, ok := SpanFromContext(ctx)
assert.True(ok)
assert.Equal(expectedSpan, span)
}
func TestSpanFromContextNil(t *testing.T) {
assert := assert.New(t)
// create a context without a span
ctx := context.Background()
span, ok := SpanFromContext(ctx)
assert.False(ok)
assert.Nil(span)
span, ok = SpanFromContext(nil)
assert.False(ok)
assert.Nil(span)
}
func TestSpanMissingParent(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
// assuming we're in an inner function and we
// forget the nil or ok checks
ctx := context.Background()
span, _ := SpanFromContext(ctx)
// span is nil according to the API
child := tracer.NewChildSpan("redis.command", span)
child.Finish()
// the child is finished but it's not recorded in
// the tracer buffer because the service is missing
assert.True(child.Duration > 0)
assert.Equal(1, len(tracer.channels.trace))
}

View file

@ -1,114 +0,0 @@
package tracer
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/ugorji/go/codec"
)
func TestEncoderContentType(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
encoder Encoder
contentType string
}{
{newJSONEncoder(), "application/json"},
{newMsgpackEncoder(), "application/msgpack"},
}
for _, tc := range testCases {
assert.Equal(tc.contentType, tc.encoder.ContentType())
}
}
func TestJSONEncoding(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
traces int
size int
}{
{1, 1},
{3, 1},
{1, 3},
{3, 3},
}
for _, tc := range testCases {
payload := getTestTrace(tc.traces, tc.size)
encoder := newJSONEncoder()
err := encoder.EncodeTraces(payload)
assert.Nil(err)
// decode to check the right encoding
var traces [][]*Span
dec := json.NewDecoder(encoder.buffer)
err = dec.Decode(&traces)
assert.Nil(err)
assert.Len(traces, tc.traces)
for _, trace := range traces {
assert.Len(trace, tc.size)
span := trace[0]
assert.Equal(uint64(42), span.TraceID)
assert.Equal(uint64(52), span.SpanID)
assert.Equal(uint64(42), span.ParentID)
assert.Equal("web", span.Type)
assert.Equal("high.throughput", span.Service)
assert.Equal("sending.events", span.Name)
assert.Equal("SEND /data", span.Resource)
assert.Equal(int64(1481215590883401105), span.Start)
assert.Equal(int64(1000000000), span.Duration)
assert.Equal("192.168.0.1", span.Meta["http.host"])
assert.Equal(float64(41.99), span.Metrics["http.monitor"])
}
}
}
func TestMsgpackEncoding(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
traces int
size int
}{
{1, 1},
{3, 1},
{1, 3},
{3, 3},
}
for _, tc := range testCases {
payload := getTestTrace(tc.traces, tc.size)
encoder := newMsgpackEncoder()
err := encoder.EncodeTraces(payload)
assert.Nil(err)
// decode to check the right encoding
var traces [][]*Span
var mh codec.MsgpackHandle
dec := codec.NewDecoder(encoder.buffer, &mh)
err = dec.Decode(&traces)
assert.Nil(err)
assert.Len(traces, tc.traces)
for _, trace := range traces {
assert.Len(trace, tc.size)
span := trace[0]
assert.Equal(uint64(42), span.TraceID)
assert.Equal(uint64(52), span.SpanID)
assert.Equal(uint64(42), span.ParentID)
assert.Equal("web", span.Type)
assert.Equal("high.throughput", span.Service)
assert.Equal("sending.events", span.Name)
assert.Equal("SEND /data", span.Resource)
assert.Equal(int64(1481215590883401105), span.Start)
assert.Equal(int64(1000000000), span.Duration)
assert.Equal("192.168.0.1", span.Meta["http.host"])
assert.Equal(float64(41.99), span.Metrics["http.monitor"])
}
}
}

View file

@ -1,98 +0,0 @@
package tracer
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestErrorSpanBufFull(t *testing.T) {
assert := assert.New(t)
err := &errorSpanBufFull{Len: 42}
assert.Equal("span buffer is full (length: 42)", err.Error())
assert.Equal("ErrorSpanBufFull", errorKey(err))
}
func TestErrorTraceChanFull(t *testing.T) {
assert := assert.New(t)
err := &errorTraceChanFull{Len: 42}
assert.Equal("trace channel is full (length: 42)", err.Error())
assert.Equal("ErrorTraceChanFull", errorKey(err))
}
func TestErrorServiceChanFull(t *testing.T) {
assert := assert.New(t)
err := &errorServiceChanFull{Len: 42}
assert.Equal("service channel is full (length: 42)", err.Error())
assert.Equal("ErrorServiceChanFull", errorKey(err))
}
func TestErrorTraceIDMismatch(t *testing.T) {
assert := assert.New(t)
err := &errorTraceIDMismatch{Expected: 42, Actual: 65535}
assert.Equal("trace ID mismatch (expected: 2a actual: ffff)", err.Error())
assert.Equal("ErrorTraceIDMismatch", errorKey(err))
}
func TestErrorNoSpanBuf(t *testing.T) {
assert := assert.New(t)
err := &errorNoSpanBuf{SpanName: "do"}
assert.Equal("no span buffer (span name: 'do')", err.Error())
}
func TestErrorFlushLostTraces(t *testing.T) {
assert := assert.New(t)
err := &errorFlushLostTraces{Nb: 100}
assert.Equal("unable to flush traces, lost 100 traces", err.Error())
}
func TestErrorFlushLostServices(t *testing.T) {
assert := assert.New(t)
err := &errorFlushLostServices{Nb: 100}
assert.Equal("unable to flush services, lost 100 services", err.Error())
}
func TestErrorKey(t *testing.T) {
assert := assert.New(t)
assert.Equal("this is something unexpected", errorKey(fmt.Errorf("this is something unexpected")))
assert.Equal("", errorKey(nil))
}
func TestAggregateErrors(t *testing.T) {
assert := assert.New(t)
errChan := make(chan error, 100)
errChan <- &errorSpanBufFull{Len: 1000}
errChan <- &errorSpanBufFull{Len: 1000}
errChan <- &errorSpanBufFull{Len: 1000}
errChan <- &errorSpanBufFull{Len: 1000}
errChan <- &errorFlushLostTraces{Nb: 42}
errChan <- &errorTraceIDMismatch{Expected: 42, Actual: 1}
errChan <- &errorTraceIDMismatch{Expected: 42, Actual: 4095}
errs := aggregateErrors(errChan)
assert.Equal(map[string]errorSummary{
"ErrorSpanBufFull": errorSummary{
Count: 4,
Example: "span buffer is full (length: 1000)",
},
"ErrorTraceIDMismatch": errorSummary{
Count: 2,
Example: "trace ID mismatch (expected: 2a actual: fff)",
},
"ErrorFlushLostTraces": errorSummary{
Count: 1,
Example: "unable to flush traces, lost 42 traces",
},
}, errs)
}

View file

@ -1,73 +0,0 @@
package tracer_test
import (
"context"
"fmt"
"io"
"log"
"net/http"
"os"
"github.com/DataDog/dd-trace-go/tracer"
)
func saveFile(ctx context.Context, path string, r io.Reader) error {
// Start a new span that is the child of the span stored in the context, and
// attach it to the current context. If the context has no span, it will
// return an empty root span.
span, ctx := tracer.NewChildSpanWithContext("filestore.saveFile", ctx)
defer span.Finish()
// save the file contents.
file, err := os.Create(path)
if err != nil {
span.SetError(err)
return err
}
defer file.Close()
_, err = io.Copy(file, r)
span.SetError(err)
return err
}
func saveFileHandler(w http.ResponseWriter, r *http.Request) {
// the name of the operation we're measuring
name := "http.request"
service := "example-filestore"
resource := "/saveFile"
// This is the main entry point of our application, so we create a root span
// that includes the service and resource name.
span := tracer.NewRootSpan(name, service, resource)
defer span.Finish()
// Add the span to the request's context so we can pass the tracing information
// down the stack.
ctx := span.Context(r.Context())
// Do the work.
err := saveFile(ctx, "/tmp/example", r.Body)
span.SetError(err) // no-op if err == nil
if err != nil {
http.Error(w, fmt.Sprintf("error saving file! %s", err), 500)
return
}
w.Write([]byte("saved file!"))
}
// Tracing the hierarchy of spans in a request is a key part of tracing. This, for example,
// let's a developer associate all of the database calls in a web request. As of Go 1.7,
// the standard way of doing this is with the context package. Along with supporting
// deadlines, cancellation signals and more, Contexts are perfect for passing (optional)
// telemetry data through your stack.
//
// Read more about contexts here: https://golang.org/pkg/context/
//
// Here is an example illustrating how to pass tracing data with contexts.
func Example_context() {
http.HandleFunc("/saveFile", saveFileHandler)
log.Fatal(http.ListenAndServe(":8080", nil))
}

View file

@ -1,23 +0,0 @@
package tracer_test
import (
"net/http"
"github.com/DataDog/dd-trace-go/tracer"
)
func Example() {
span := tracer.NewRootSpan("http.client.request", "example.com", "/user/{id}")
defer span.Finish()
url := "http://example.com/user/123"
resp, err := http.Get(url)
if err != nil {
span.SetError(err)
return
}
span.SetMeta("http.status", resp.Status)
span.SetMeta("http.url", url)
}

View file

@ -1,289 +0,0 @@
package tracer
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/DataDog/dd-trace-go/tracer/ext"
)
func TestSpanStart(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// a new span sets the Start after the initialization
assert.NotEqual(int64(0), span.Start)
}
func TestSpanString(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// don't bother checking the contents, just make sure it works.
assert.NotEqual("", span.String())
span.Finish()
assert.NotEqual("", span.String())
}
func TestSpanSetMeta(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// check the map is properly initialized
span.SetMeta("status.code", "200")
assert.Equal("200", span.Meta["status.code"])
// operating on a finished span is a no-op
nMeta := len(span.Meta)
span.Finish()
span.SetMeta("finished.test", "true")
assert.Equal(len(span.Meta), nMeta)
assert.Equal(span.Meta["finished.test"], "")
}
func TestSpanSetMetas(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
span.SetSamplingPriority(0) // avoid interferences with "_sampling_priority_v1" meta
metas := map[string]string{
"error.msg": "Something wrong",
"error.type": "*errors.errorString",
"status.code": "200",
"system.pid": "29176",
}
extraMetas := map[string]string{
"custom.1": "something custom",
"custom.2": "something even more special",
}
nopMetas := map[string]string{
"nopKey1": "nopValue1",
"nopKey2": "nopValue2",
}
// check the map is properly initialized
span.SetMetas(metas)
assert.Equal(len(metas), len(span.Meta))
for k := range metas {
assert.Equal(metas[k], span.Meta[k])
}
// check a second call adds the new metas, but does not remove old ones
span.SetMetas(extraMetas)
assert.Equal(len(metas)+len(extraMetas), len(span.Meta))
for k := range extraMetas {
assert.Equal(extraMetas[k], span.Meta[k])
}
assert.Equal(span.Meta["status.code"], "200")
// operating on a finished span is a no-op
span.Finish()
span.SetMetas(nopMetas)
assert.Equal(len(metas)+len(extraMetas), len(span.Meta))
for k := range nopMetas {
assert.Equal("", span.Meta[k])
}
}
func TestSpanSetMetric(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// check the map is properly initialized
span.SetMetric("bytes", 1024.42)
assert.Equal(1, len(span.Metrics))
assert.Equal(1024.42, span.Metrics["bytes"])
// operating on a finished span is a no-op
span.Finish()
span.SetMetric("finished.test", 1337)
assert.Equal(1, len(span.Metrics))
assert.Equal(0.0, span.Metrics["finished.test"])
}
func TestSpanError(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// check the error is set in the default meta
err := errors.New("Something wrong")
span.SetError(err)
assert.Equal(int32(1), span.Error)
assert.Equal("Something wrong", span.Meta["error.msg"])
assert.Equal("*errors.errorString", span.Meta["error.type"])
assert.NotEqual("", span.Meta["error.stack"])
// operating on a finished span is a no-op
span = tracer.NewRootSpan("flask.request", "flask", "/")
nMeta := len(span.Meta)
span.Finish()
span.SetError(err)
assert.Equal(int32(0), span.Error)
assert.Equal(nMeta, len(span.Meta))
assert.Equal("", span.Meta["error.msg"])
assert.Equal("", span.Meta["error.type"])
assert.Equal("", span.Meta["error.stack"])
}
func TestSpanError_Typed(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// check the error is set in the default meta
err := &boomError{}
span.SetError(err)
assert.Equal(int32(1), span.Error)
assert.Equal("boom", span.Meta["error.msg"])
assert.Equal("*tracer.boomError", span.Meta["error.type"])
assert.NotEqual("", span.Meta["error.stack"])
}
func TestEmptySpan(t *testing.T) {
// ensure the empty span won't crash the app
var span Span
span.SetMeta("a", "b")
span.SetError(nil)
span.Finish()
var s *Span
s.SetMeta("a", "b")
s.SetError(nil)
s.Finish()
}
func TestSpanErrorNil(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// don't set the error if it's nil
nMeta := len(span.Meta)
span.SetError(nil)
assert.Equal(int32(0), span.Error)
assert.Equal(nMeta, len(span.Meta))
}
func TestSpanFinish(t *testing.T) {
assert := assert.New(t)
wait := time.Millisecond * 2
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// the finish should set finished and the duration
time.Sleep(wait)
span.Finish()
assert.True(span.Duration > int64(wait))
assert.True(span.finished)
}
func TestSpanFinishTwice(t *testing.T) {
assert := assert.New(t)
wait := time.Millisecond * 2
tracer, _ := getTestTracer()
defer tracer.Stop()
assert.Len(tracer.channels.trace, 0)
// the finish must be idempotent
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
time.Sleep(wait)
span.Finish()
assert.Len(tracer.channels.trace, 1)
previousDuration := span.Duration
time.Sleep(wait)
span.Finish()
assert.Equal(previousDuration, span.Duration)
assert.Len(tracer.channels.trace, 1)
}
func TestSpanContext(t *testing.T) {
ctx := context.Background()
_, ok := SpanFromContext(ctx)
assert.False(t, ok)
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
ctx = span.Context(ctx)
s2, ok := SpanFromContext(ctx)
assert.True(t, ok)
assert.Equal(t, span.SpanID, s2.SpanID)
}
// Prior to a bug fix, this failed when running `go test -race`
func TestSpanModifyWhileFlushing(t *testing.T) {
tracer, _ := getTestTracer()
defer tracer.Stop()
done := make(chan struct{})
go func() {
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
span.Finish()
// It doesn't make much sense to update the span after it's been finished,
// but an error in a user's code could lead to this.
span.SetMeta("race_test", "true")
span.SetMetric("race_test2", 133.7)
span.SetMetrics("race_test3", 133.7)
span.SetError(errors.New("t"))
done <- struct{}{}
}()
run := true
for run {
select {
case <-done:
run = false
default:
tracer.flushTraces()
}
}
}
func TestSpanSamplingPriority(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
span := tracer.NewRootSpan("my.name", "my.service", "my.resource")
assert.Equal(0.0, span.Metrics["_sampling_priority_v1"], "default sampling priority if undefined is 0")
assert.False(span.HasSamplingPriority(), "by default, sampling priority is undefined")
assert.Equal(0, span.GetSamplingPriority(), "default sampling priority for root spans is 0")
childSpan := tracer.NewChildSpan("my.child", span)
assert.Equal(span.Metrics["_sampling_priority_v1"], childSpan.Metrics["_sampling_priority_v1"])
assert.Equal(span.HasSamplingPriority(), childSpan.HasSamplingPriority())
assert.Equal(span.GetSamplingPriority(), childSpan.GetSamplingPriority())
for _, priority := range []int{
ext.PriorityUserReject,
ext.PriorityAutoReject,
ext.PriorityAutoKeep,
ext.PriorityUserKeep,
999, // not used yet, but we should allow it
} {
span.SetSamplingPriority(priority)
assert.True(span.HasSamplingPriority())
assert.Equal(priority, span.GetSamplingPriority())
childSpan = tracer.NewChildSpan("my.child", span)
assert.Equal(span.Metrics["_sampling_priority_v1"], childSpan.Metrics["_sampling_priority_v1"])
assert.Equal(span.HasSamplingPriority(), childSpan.HasSamplingPriority())
assert.Equal(span.GetSamplingPriority(), childSpan.GetSamplingPriority())
}
}
type boomError struct{}
func (e *boomError) Error() string { return "boom" }

View file

@ -1,30 +0,0 @@
package tracer
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
)
func BenchmarkNormalTimeNow(b *testing.B) {
for n := 0; n < b.N; n++ {
lowPrecisionNow()
}
}
func BenchmarkHighPrecisionTime(b *testing.B) {
for n := 0; n < b.N; n++ {
highPrecisionNow()
}
}
func TestHighPrecisionTimerIsMoreAccurate(t *testing.T) {
startLow := lowPrecisionNow()
startHigh := highPrecisionNow()
stopHigh := highPrecisionNow()
for stopHigh == startHigh {
stopHigh = highPrecisionNow()
}
stopLow := lowPrecisionNow()
assert.Equal(t, int64(0), stopLow-startLow)
}

View file

@ -1,648 +0,0 @@
package tracer
import (
"context"
"fmt"
"github.com/DataDog/dd-trace-go/tracer/ext"
"net/http"
"os"
"strconv"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDefaultTracer(t *testing.T) {
assert := assert.New(t)
var wg sync.WaitGroup
// the default client must be available
assert.NotNil(DefaultTracer)
// package free functions must proxy the calls to the
// default client
root := NewRootSpan("pylons.request", "pylons", "/")
NewChildSpan("pylons.request", root)
wg.Add(2)
go func() {
for i := 0; i < 1000; i++ {
Disable()
Enable()
}
wg.Done()
}()
go func() {
for i := 0; i < 1000; i++ {
_ = DefaultTracer.Enabled()
}
wg.Done()
}()
wg.Wait()
}
func TestNewSpan(t *testing.T) {
assert := assert.New(t)
// the tracer must create root spans
tracer := NewTracer()
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
assert.Equal(uint64(0), span.ParentID)
assert.Equal("pylons", span.Service)
assert.Equal("pylons.request", span.Name)
assert.Equal("/", span.Resource)
}
func TestNewSpanFromContextNil(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
child := tracer.NewChildSpanFromContext("abc", nil)
assert.Equal("abc", child.Name)
assert.Equal("", child.Service)
child = tracer.NewChildSpanFromContext("def", context.Background())
assert.Equal("def", child.Name)
assert.Equal("", child.Service)
}
func TestNewChildSpanWithContext(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
// nil context
span, ctx := tracer.NewChildSpanWithContext("abc", nil)
assert.Equal("abc", span.Name)
assert.Equal("", span.Service)
assert.Equal(span.ParentID, span.SpanID) // it should be a root span
assert.Equal(span.Tracer(), tracer)
// the returned ctx should contain the created span
assert.NotNil(ctx)
ctxSpan, ok := SpanFromContext(ctx)
assert.True(ok)
assert.Equal(span, ctxSpan)
// context without span
span, ctx = tracer.NewChildSpanWithContext("abc", context.Background())
assert.Equal("abc", span.Name)
assert.Equal("", span.Service)
assert.Equal(span.ParentID, span.SpanID) // it should be a root span
// the returned ctx should contain the created span
assert.NotNil(ctx)
ctxSpan, ok = SpanFromContext(ctx)
assert.True(ok)
assert.Equal(span, ctxSpan)
// context with span
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
parentCTX := ContextWithSpan(context.Background(), parent)
span, ctx = tracer.NewChildSpanWithContext("def", parentCTX)
assert.Equal("def", span.Name)
assert.Equal("pylons", span.Service)
assert.Equal(parent.Service, span.Service)
// the created span should be a child of the parent span
assert.Equal(span.ParentID, parent.SpanID)
// the returned ctx should contain the created span
assert.NotNil(ctx)
ctxSpan, ok = SpanFromContext(ctx)
assert.True(ok)
assert.Equal(ctxSpan, span)
}
func TestNewSpanFromContext(t *testing.T) {
assert := assert.New(t)
// the tracer must create child spans
tracer := NewTracer()
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
ctx := ContextWithSpan(context.Background(), parent)
child := tracer.NewChildSpanFromContext("redis.command", ctx)
// ids and services are inherited
assert.Equal(parent.SpanID, child.ParentID)
assert.Equal(parent.TraceID, child.TraceID)
assert.Equal(parent.Service, child.Service)
// the resource is not inherited and defaults to the name
assert.Equal("redis.command", child.Resource)
// the tracer instance is the same
assert.Equal(tracer, parent.tracer)
assert.Equal(tracer, child.tracer)
}
func TestNewSpanChild(t *testing.T) {
assert := assert.New(t)
// the tracer must create child spans
tracer := NewTracer()
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
child := tracer.NewChildSpan("redis.command", parent)
// ids and services are inherited
assert.Equal(parent.SpanID, child.ParentID)
assert.Equal(parent.TraceID, child.TraceID)
assert.Equal(parent.Service, child.Service)
// the resource is not inherited and defaults to the name
assert.Equal("redis.command", child.Resource)
// the tracer instance is the same
assert.Equal(tracer, parent.tracer)
assert.Equal(tracer, child.tracer)
}
func TestNewRootSpanHasPid(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
root := tracer.NewRootSpan("pylons.request", "pylons", "/")
assert.Equal(strconv.Itoa(os.Getpid()), root.GetMeta(ext.Pid))
}
func TestNewChildHasNoPid(t *testing.T) {
assert := assert.New(t)
tracer := NewTracer()
root := tracer.NewRootSpan("pylons.request", "pylons", "/")
child := tracer.NewChildSpan("redis.command", root)
assert.Equal("", child.GetMeta(ext.Pid))
}
func TestTracerDisabled(t *testing.T) {
assert := assert.New(t)
// disable the tracer and be sure that the span is not added
tracer := NewTracer()
tracer.SetEnabled(false)
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
span.Finish()
assert.Len(tracer.channels.trace, 0)
}
func TestTracerEnabledAgain(t *testing.T) {
assert := assert.New(t)
// disable the tracer and enable it again
tracer := NewTracer()
tracer.SetEnabled(false)
preSpan := tracer.NewRootSpan("pylons.request", "pylons", "/")
preSpan.Finish()
assert.Len(tracer.channels.trace, 0)
tracer.SetEnabled(true)
postSpan := tracer.NewRootSpan("pylons.request", "pylons", "/")
postSpan.Finish()
assert.Len(tracer.channels.trace, 1)
}
func TestTracerSampler(t *testing.T) {
assert := assert.New(t)
sampleRate := 0.5
tracer := NewTracer()
tracer.SetSampleRate(sampleRate)
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
// The span might be sampled or not, we don't know, but at least it should have the sample rate metric
assert.Equal(sampleRate, span.Metrics[sampleRateMetricKey])
}
func TestTracerEdgeSampler(t *testing.T) {
assert := assert.New(t)
// a sample rate of 0 should sample nothing
tracer0 := NewTracer()
tracer0.SetSampleRate(0)
// a sample rate of 1 should sample everything
tracer1 := NewTracer()
tracer1.SetSampleRate(1)
count := traceChanLen / 3
for i := 0; i < count; i++ {
span0 := tracer0.NewRootSpan("pylons.request", "pylons", "/")
span0.Finish()
span1 := tracer1.NewRootSpan("pylons.request", "pylons", "/")
span1.Finish()
}
assert.Len(tracer0.channels.trace, 0)
assert.Len(tracer1.channels.trace, count)
tracer0.Stop()
tracer1.Stop()
}
func TestTracerConcurrent(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
// Wait for three different goroutines that should create
// three different traces with one child each
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
tracer.NewRootSpan("pylons.request", "pylons", "/").Finish()
}()
go func() {
defer wg.Done()
tracer.NewRootSpan("pylons.request", "pylons", "/home").Finish()
}()
go func() {
defer wg.Done()
tracer.NewRootSpan("pylons.request", "pylons", "/trace").Finish()
}()
wg.Wait()
tracer.ForceFlush()
traces := transport.Traces()
assert.Len(traces, 3)
assert.Len(traces[0], 1)
assert.Len(traces[1], 1)
assert.Len(traces[2], 1)
}
func TestTracerParentFinishBeforeChild(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
// Testing an edge case: a child refers to a parent that is already closed.
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
parent.Finish()
tracer.ForceFlush()
traces := transport.Traces()
assert.Len(traces, 1)
assert.Len(traces[0], 1)
assert.Equal(parent, traces[0][0])
child := tracer.NewChildSpan("redis.command", parent)
child.Finish()
tracer.ForceFlush()
traces = transport.Traces()
assert.Len(traces, 1)
assert.Len(traces[0], 1)
assert.Equal(child, traces[0][0])
assert.Equal(parent.SpanID, traces[0][0].ParentID, "child should refer to parent, even if they have been flushed separately")
}
func TestTracerConcurrentMultipleSpans(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
// Wait for two different goroutines that should create
// two traces with two children each
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
child := tracer.NewChildSpan("redis.command", parent)
child.Finish()
parent.Finish()
}()
go func() {
defer wg.Done()
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
child := tracer.NewChildSpan("redis.command", parent)
child.Finish()
parent.Finish()
}()
wg.Wait()
tracer.ForceFlush()
traces := transport.Traces()
assert.Len(traces, 2)
assert.Len(traces[0], 2)
assert.Len(traces[1], 2)
}
func TestTracerAtomicFlush(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
// Make sure we don't flush partial bits of traces
root := tracer.NewRootSpan("pylons.request", "pylons", "/")
span := tracer.NewChildSpan("redis.command", root)
span1 := tracer.NewChildSpan("redis.command.1", span)
span2 := tracer.NewChildSpan("redis.command.2", span)
span.Finish()
span1.Finish()
span2.Finish()
tracer.ForceFlush()
traces := transport.Traces()
assert.Len(traces, 0, "nothing should be flushed now as span2 is not finished yet")
root.Finish()
tracer.ForceFlush()
traces = transport.Traces()
assert.Len(traces, 1)
assert.Len(traces[0], 4, "all spans should show up at once")
}
func TestTracerServices(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
tracer.SetServiceInfo("svc1", "a", "b")
tracer.SetServiceInfo("svc2", "c", "d")
tracer.SetServiceInfo("svc1", "e", "f")
tracer.Stop()
assert.Len(transport.services, 2)
svc1 := transport.services["svc1"]
assert.NotNil(svc1)
assert.Equal("svc1", svc1.Name)
assert.Equal("e", svc1.App)
assert.Equal("f", svc1.AppType)
svc2 := transport.services["svc2"]
assert.NotNil(svc2)
assert.Equal("svc2", svc2.Name)
assert.Equal("c", svc2.App)
assert.Equal("d", svc2.AppType)
}
func TestTracerServicesDisabled(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
tracer.SetEnabled(false)
tracer.SetServiceInfo("svc1", "a", "b")
tracer.Stop()
assert.Len(transport.services, 0)
}
func TestTracerMeta(t *testing.T) {
assert := assert.New(t)
var nilTracer *Tracer
nilTracer.SetMeta("key", "value")
assert.Nil(nilTracer.getAllMeta(), "nil tracer should return nil meta")
tracer, _ := getTestTracer()
defer tracer.Stop()
assert.Nil(tracer.getAllMeta(), "by default, no meta")
tracer.SetMeta("env", "staging")
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
assert.Equal("staging", span.GetMeta("env"))
assert.Equal("", span.GetMeta("component"))
span.Finish()
assert.Equal(map[string]string{"env": "staging"}, tracer.getAllMeta(), "there should be one meta")
tracer.SetMeta("component", "core")
span = tracer.NewRootSpan("pylons.request", "pylons", "/")
assert.Equal("staging", span.GetMeta("env"))
assert.Equal("core", span.GetMeta("component"))
span.Finish()
assert.Equal(map[string]string{"env": "staging", "component": "core"}, tracer.getAllMeta(), "there should be two entries")
tracer.SetMeta("env", "prod")
span = tracer.NewRootSpan("pylons.request", "pylons", "/")
assert.Equal("prod", span.GetMeta("env"))
assert.Equal("core", span.GetMeta("component"))
span.SetMeta("env", "sandbox")
assert.Equal("sandbox", span.GetMeta("env"))
assert.Equal("core", span.GetMeta("component"))
span.Finish()
assert.Equal(map[string]string{"env": "prod", "component": "core"}, tracer.getAllMeta(), "key1 should have been updated")
}
func TestTracerRace(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
total := (traceChanLen / 3) / 10
var wg sync.WaitGroup
wg.Add(total)
// Trying to be quite brutal here, firing lots of concurrent things, finishing in
// different orders, and modifying spans after creation.
for n := 0; n < total; n++ {
i := n // keep local copy
odd := ((i % 2) != 0)
go func() {
if i%11 == 0 {
time.Sleep(time.Microsecond)
}
tracer.SetMeta("foo", "bar")
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
NewChildSpan("redis.command", parent).Finish()
child := NewChildSpan("async.service", parent)
if i%13 == 0 {
time.Sleep(time.Microsecond)
}
if odd {
parent.SetMeta("odd", "true")
parent.SetMetric("oddity", 1)
parent.Finish()
} else {
child.SetMeta("odd", "false")
child.SetMetric("oddity", 0)
child.Finish()
}
if i%17 == 0 {
time.Sleep(time.Microsecond)
}
if odd {
child.Resource = "HGETALL"
child.SetMeta("odd", "false")
child.SetMetric("oddity", 0)
} else {
parent.Resource = "/" + strconv.Itoa(i) + ".html"
parent.SetMeta("odd", "true")
parent.SetMetric("oddity", 1)
}
if i%19 == 0 {
time.Sleep(time.Microsecond)
}
if odd {
child.Finish()
} else {
parent.Finish()
}
wg.Done()
}()
}
wg.Wait()
tracer.ForceFlush()
traces := transport.Traces()
assert.Len(traces, total, "we should have exactly as many traces as expected")
for _, trace := range traces {
assert.Len(trace, 3, "each trace should have exactly 3 spans")
var parent, child, redis *Span
for _, span := range trace {
assert.Equal("bar", span.GetMeta("foo"), "tracer meta should have been applied to all spans")
switch span.Name {
case "pylons.request":
parent = span
case "async.service":
child = span
case "redis.command":
redis = span
default:
assert.Fail("unexpected span", span)
}
}
assert.NotNil(parent)
assert.NotNil(child)
assert.NotNil(redis)
assert.Equal(uint64(0), parent.ParentID)
assert.Equal(parent.TraceID, parent.SpanID)
assert.Equal(parent.TraceID, redis.TraceID)
assert.Equal(parent.TraceID, child.TraceID)
assert.Equal(parent.TraceID, redis.ParentID)
assert.Equal(parent.TraceID, child.ParentID)
}
}
// TestWorker is definitely a flaky test, as here we test that the worker
// background task actually does flush things. Most other tests are and should
// be using ForceFlush() to make sure things are really sent to transport.
// Here, we just wait until things show up, as we would do with a real program.
func TestWorker(t *testing.T) {
assert := assert.New(t)
tracer, transport := getTestTracer()
defer tracer.Stop()
n := traceChanLen * 10 // put more traces than the chan size, on purpose
for i := 0; i < n; i++ {
root := tracer.NewRootSpan("pylons.request", "pylons", "/")
child := tracer.NewChildSpan("redis.command", root)
child.Finish()
root.Finish()
}
now := time.Now()
count := 0
for time.Now().Before(now.Add(time.Minute)) && count < traceChanLen {
nbTraces := len(transport.Traces())
if nbTraces > 0 {
t.Logf("popped %d traces", nbTraces)
}
count += nbTraces
time.Sleep(time.Millisecond)
}
// here we just check that we have "enough traces". In practice, lots of them
// are dropped, it's another interesting side-effect of this test: it does
// trigger error messages (which are repeated, so it aggregates them etc.)
if count < traceChanLen {
assert.Fail(fmt.Sprintf("timeout, not enough traces in buffer (%d/%d)", count, n))
}
}
// BenchmarkConcurrentTracing tests the performance of spawning a lot of
// goroutines where each one creates a trace with a parent and a child.
func BenchmarkConcurrentTracing(b *testing.B) {
tracer, _ := getTestTracer()
defer tracer.Stop()
b.ResetTimer()
for n := 0; n < b.N; n++ {
go func() {
parent := tracer.NewRootSpan("pylons.request", "pylons", "/")
defer parent.Finish()
for i := 0; i < 10; i++ {
tracer.NewChildSpan("redis.command", parent).Finish()
}
}()
}
}
// BenchmarkTracerAddSpans tests the performance of creating and finishing a root
// span. It should include the encoding overhead.
func BenchmarkTracerAddSpans(b *testing.B) {
tracer, _ := getTestTracer()
defer tracer.Stop()
for n := 0; n < b.N; n++ {
span := tracer.NewRootSpan("pylons.request", "pylons", "/")
span.Finish()
}
}
// getTestTracer returns a Tracer with a DummyTransport
func getTestTracer() (*Tracer, *dummyTransport) {
transport := &dummyTransport{getEncoder: msgpackEncoderFactory}
tracer := NewTracerTransport(transport)
return tracer, transport
}
// Mock Transport with a real Encoder
type dummyTransport struct {
getEncoder encoderFactory
traces [][]*Span
services map[string]Service
sync.RWMutex // required because of some poll-testing (eg: worker)
}
func (t *dummyTransport) SendTraces(traces [][]*Span) (*http.Response, error) {
t.Lock()
t.traces = append(t.traces, traces...)
t.Unlock()
encoder := t.getEncoder()
return nil, encoder.EncodeTraces(traces)
}
func (t *dummyTransport) SendServices(services map[string]Service) (*http.Response, error) {
t.Lock()
t.services = services
t.Unlock()
encoder := t.getEncoder()
return nil, encoder.EncodeServices(services)
}
func (t *dummyTransport) Traces() [][]*Span {
t.Lock()
defer t.Unlock()
traces := t.traces
t.traces = nil
return traces
}
func (t *dummyTransport) SetHeader(key, value string) {}

View file

@ -1,202 +0,0 @@
package tracer
import (
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
// getTestSpan returns a Span with different fields set
func getTestSpan() *Span {
return &Span{
TraceID: 42,
SpanID: 52,
ParentID: 42,
Type: "web",
Service: "high.throughput",
Name: "sending.events",
Resource: "SEND /data",
Start: 1481215590883401105,
Duration: 1000000000,
Meta: map[string]string{"http.host": "192.168.0.1"},
Metrics: map[string]float64{"http.monitor": 41.99},
}
}
// getTestTrace returns a list of traces that is composed by ``traceN`` number
// of traces, each one composed by ``size`` number of spans.
func getTestTrace(traceN, size int) [][]*Span {
var traces [][]*Span
for i := 0; i < traceN; i++ {
trace := []*Span{}
for j := 0; j < size; j++ {
trace = append(trace, getTestSpan())
}
traces = append(traces, trace)
}
return traces
}
func getTestServices() map[string]Service {
return map[string]Service{
"svc1": Service{Name: "scv1", App: "a", AppType: "b"},
"svc2": Service{Name: "scv2", App: "c", AppType: "d"},
}
}
type mockDatadogAPIHandler struct {
t *testing.T
}
func (m mockDatadogAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
assert := assert.New(m.t)
header := r.Header.Get("X-Datadog-Trace-Count")
assert.NotEqual("", header, "X-Datadog-Trace-Count header should be here")
count, err := strconv.Atoi(header)
assert.Nil(err, "header should be an int")
assert.NotEqual(0, count, "there should be a non-zero amount of traces")
}
func mockDatadogAPINewServer(t *testing.T) *httptest.Server {
handler := mockDatadogAPIHandler{t: t}
server := httptest.NewServer(handler)
return server
}
func TestTracesAgentIntegration(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
payload [][]*Span
}{
{getTestTrace(1, 1)},
{getTestTrace(10, 1)},
{getTestTrace(1, 10)},
{getTestTrace(10, 10)},
}
for _, tc := range testCases {
transport := newHTTPTransport(defaultHostname, defaultPort)
response, err := transport.SendTraces(tc.payload)
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
}
func TestAPIDowngrade(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
transport.traceURL = "http://localhost:8126/v0.0/traces"
// if we get a 404 we should downgrade the API
traces := getTestTrace(2, 2)
response, err := transport.SendTraces(traces)
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
func TestEncoderDowngrade(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
transport.traceURL = "http://localhost:8126/v0.2/traces"
// if we get a 415 because of a wrong encoder, we should downgrade the encoder
traces := getTestTrace(2, 2)
response, err := transport.SendTraces(traces)
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
func TestTransportServices(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
response, err := transport.SendServices(getTestServices())
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
func TestTransportServicesDowngrade_0_0(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
transport.serviceURL = "http://localhost:8126/v0.0/services"
response, err := transport.SendServices(getTestServices())
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
func TestTransportServicesDowngrade_0_2(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
transport.serviceURL = "http://localhost:8126/v0.2/services"
response, err := transport.SendServices(getTestServices())
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
func TestTransportEncoderPool(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
// MsgpackEncoder is the default encoder of the pool
encoder := transport.getEncoder()
assert.Equal("application/msgpack", encoder.ContentType())
}
func TestTransportSwitchEncoder(t *testing.T) {
assert := assert.New(t)
transport := newHTTPTransport(defaultHostname, defaultPort)
transport.changeEncoder(jsonEncoderFactory)
// MsgpackEncoder is the default encoder of the pool
encoder := transport.getEncoder()
assert.Equal("application/json", encoder.ContentType())
}
func TestTraceCountHeader(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
payload [][]*Span
}{
{getTestTrace(1, 1)},
{getTestTrace(10, 1)},
{getTestTrace(100, 10)},
}
receiver := mockDatadogAPINewServer(t)
parsedURL, err := url.Parse(receiver.URL)
assert.NoError(err)
host := parsedURL.Host
hostItems := strings.Split(host, ":")
assert.Equal(2, len(hostItems), "port should be given, as it's chosen randomly")
hostname := hostItems[0]
port := hostItems[1]
for _, tc := range testCases {
transport := newHTTPTransport(hostname, port)
response, err := transport.SendTraces(tc.payload)
assert.NoError(err)
assert.NotNil(response)
assert.Equal(200, response.StatusCode)
}
receiver.Close()
}

View file

@ -1,34 +0,0 @@
package sarama
import "testing"
var (
aclCreateRequest = []byte{
0, 0, 0, 1,
3, // resource type = group
0, 5, 'g', 'r', 'o', 'u', 'p',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
2, // all
2, // deny
}
)
func TestCreateAclsRequest(t *testing.T) {
req := &CreateAclsRequest{
AclCreations: []*AclCreation{{
Resource: Resource{
ResourceType: AclResourceGroup,
ResourceName: "group",
},
Acl: Acl{
Principal: "principal",
Host: "host",
Operation: AclOperationAll,
PermissionType: AclPermissionDeny,
}},
},
}
testRequest(t, "create request", req, aclCreateRequest)
}

View file

@ -1,41 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
createResponseWithError = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 42,
0, 5, 'e', 'r', 'r', 'o', 'r',
}
createResponseArray = []byte{
0, 0, 0, 100,
0, 0, 0, 2,
0, 42,
0, 5, 'e', 'r', 'r', 'o', 'r',
0, 0,
255, 255,
}
)
func TestCreateAclsResponse(t *testing.T) {
errmsg := "error"
resp := &CreateAclsResponse{
ThrottleTime: 100 * time.Millisecond,
AclCreationResponses: []*AclCreationResponse{{
Err: ErrInvalidRequest,
ErrMsg: &errmsg,
}},
}
testResponse(t, "response with error", resp, createResponseWithError)
resp.AclCreationResponses = append(resp.AclCreationResponses, new(AclCreationResponse))
testResponse(t, "response array", resp, createResponseArray)
}

View file

@ -1,69 +0,0 @@
package sarama
import "testing"
var (
aclDeleteRequestNulls = []byte{
0, 0, 0, 1,
1,
255, 255,
255, 255,
255, 255,
11,
3,
}
aclDeleteRequest = []byte{
0, 0, 0, 1,
1, // any
0, 6, 'f', 'i', 'l', 't', 'e', 'r',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
}
aclDeleteRequestArray = []byte{
0, 0, 0, 2,
1,
0, 6, 'f', 'i', 'l', 't', 'e', 'r',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
2,
0, 5, 't', 'o', 'p', 'i', 'c',
255, 255,
255, 255,
6,
2,
}
)
func TestDeleteAclsRequest(t *testing.T) {
req := &DeleteAclsRequest{
Filters: []*AclFilter{{
ResourceType: AclResourceAny,
Operation: AclOperationAlterConfigs,
PermissionType: AclPermissionAllow,
}},
}
testRequest(t, "delete request nulls", req, aclDeleteRequestNulls)
req.Filters[0].ResourceName = nullString("filter")
req.Filters[0].Principal = nullString("principal")
req.Filters[0].Host = nullString("host")
req.Filters[0].Operation = AclOperationWrite
testRequest(t, "delete request", req, aclDeleteRequest)
req.Filters = append(req.Filters, &AclFilter{
ResourceType: AclResourceTopic,
ResourceName: nullString("topic"),
Operation: AclOperationDelete,
PermissionType: AclPermissionDeny,
})
testRequest(t, "delete request array", req, aclDeleteRequestArray)
}

View file

@ -1,38 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
deleteAclsResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 0, // no error
255, 255, // no error message
0, 0, 0, 1, // 1 matching acl
0, 0, // no error
255, 255, // no error message
2, // resource type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4,
3,
}
)
func TestDeleteAclsResponse(t *testing.T) {
resp := &DeleteAclsResponse{
ThrottleTime: 100 * time.Millisecond,
FilterResponses: []*FilterResponse{{
MatchingAcls: []*MatchingAcl{{
Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "topic"},
Acl: Acl{Principal: "principal", Host: "host", Operation: AclOperationWrite, PermissionType: AclPermissionAllow},
}},
}},
}
testResponse(t, "", resp, deleteAclsResponse)
}

View file

@ -1,35 +0,0 @@
package sarama
import (
"testing"
)
var (
aclDescribeRequest = []byte{
2, // resource type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
5, // acl operation
3, // acl permission type
}
)
func TestAclDescribeRequest(t *testing.T) {
resourcename := "topic"
principal := "principal"
host := "host"
req := &DescribeAclsRequest{
AclFilter{
ResourceType: AclResourceTopic,
ResourceName: &resourcename,
Principal: &principal,
Host: &host,
Operation: AclOperationCreate,
PermissionType: AclPermissionAllow,
},
}
testRequest(t, "", req, aclDescribeRequest)
}

View file

@ -1,45 +0,0 @@
package sarama
import (
"testing"
"time"
)
var aclDescribeResponseError = []byte{
0, 0, 0, 100,
0, 8, // error
0, 5, 'e', 'r', 'r', 'o', 'r',
0, 0, 0, 1, // 1 resource
2, // cluster type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 acl
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
}
func TestAclDescribeResponse(t *testing.T) {
errmsg := "error"
resp := &DescribeAclsResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrBrokerNotAvailable,
ErrMsg: &errmsg,
ResourceAcls: []*ResourceAcls{{
Resource: Resource{
ResourceName: "topic",
ResourceType: AclResourceTopic,
},
Acls: []*Acl{
{
Principal: "principal",
Host: "host",
Operation: AclOperationWrite,
PermissionType: AclPermissionAllow,
},
},
}},
}
testResponse(t, "describe", resp, aclDescribeResponseError)
}

View file

@ -1,23 +0,0 @@
package sarama
import "testing"
var (
addOffsetsToTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64,
0, 0,
0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd',
}
)
func TestAddOffsetsToTxnRequest(t *testing.T) {
req := &AddOffsetsToTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 0,
GroupID: "groupid",
}
testRequest(t, "", req, addOffsetsToTxnRequest)
}

View file

@ -1,22 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
addOffsetsToTxnResponse = []byte{
0, 0, 0, 100,
0, 47,
}
)
func TestAddOffsetsToTxnResponse(t *testing.T) {
resp := &AddOffsetsToTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrInvalidProducerEpoch,
}
testResponse(t, "", resp, addOffsetsToTxnResponse)
}

View file

@ -1,27 +0,0 @@
package sarama
import "testing"
var (
addPartitionsToTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64, // ProducerID
0, 0, 0, 0, // ProducerEpoch
0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, 0, 0, 0, 1,
}
)
func TestAddPartitionsToTxnRequest(t *testing.T) {
req := &AddPartitionsToTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 0,
TopicPartitions: map[string][]int32{
"topic": []int32{1},
},
}
testRequest(t, "", req, addPartitionsToTxnRequest)
}

View file

@ -1,31 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
addPartitionsToTxnResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition error
0, 0, 0, 2, // partition 2
0, 48, // error
}
)
func TestAddPartitionsToTxnResponse(t *testing.T) {
resp := &AddPartitionsToTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Errors: map[string][]*PartitionError{
"topic": []*PartitionError{&PartitionError{
Err: ErrInvalidTxnState,
Partition: 2,
}},
},
}
testResponse(t, "", resp, addPartitionsToTxnResponse)
}

View file

@ -1,86 +0,0 @@
package sarama
import "testing"
var (
emptyAlterConfigsRequest = []byte{
0, 0, 0, 0, // 0 configs
0, // don't Validate
}
singleAlterConfigsRequest = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
0, // don't validate
}
doubleAlterConfigsRequest = []byte{
0, 0, 0, 2, // 2 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
2, // a topic
0, 3, 'b', 'a', 'r', // topic name: foo
0, 0, 0, 1, //2 config
0, 12, // 12 chars
'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
0, // don't validate
}
)
func TestAlterConfigsRequest(t *testing.T) {
var request *AlterConfigsRequest
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{},
}
testRequest(t, "no requests", request, emptyAlterConfigsRequest)
configValue := "1000"
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{
&AlterConfigsResource{
Type: TopicResource,
Name: "foo",
ConfigEntries: map[string]*string{
"segment.ms": &configValue,
},
},
},
}
testRequest(t, "one config", request, singleAlterConfigsRequest)
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{
&AlterConfigsResource{
Type: TopicResource,
Name: "foo",
ConfigEntries: map[string]*string{
"segment.ms": &configValue,
},
},
&AlterConfigsResource{
Type: TopicResource,
Name: "bar",
ConfigEntries: map[string]*string{
"retention.ms": &configValue,
},
},
},
}
testRequest(t, "two configs", request, doubleAlterConfigsRequest)
}

View file

@ -1,45 +0,0 @@
package sarama
import (
"testing"
)
var (
alterResponseEmpty = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 0, // no configs
}
alterResponsePopulated = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 1, // response
0, 0, //errorcode
0, 0, //string
2, // topic
0, 3, 'f', 'o', 'o',
}
)
func TestAlterConfigsResponse(t *testing.T) {
var response *AlterConfigsResponse
response = &AlterConfigsResponse{
Resources: []*AlterConfigsResourceResponse{},
}
testVersionDecodable(t, "empty", response, alterResponseEmpty, 0)
if len(response.Resources) != 0 {
t.Error("Expected no groups")
}
response = &AlterConfigsResponse{
Resources: []*AlterConfigsResourceResponse{
&AlterConfigsResourceResponse{
ErrorCode: 0,
ErrorMsg: "",
Type: TopicResource,
Name: "foo",
},
},
}
testResponse(t, "response with error", response, alterResponsePopulated)
}

View file

@ -1,14 +0,0 @@
package sarama
import "testing"
var (
apiVersionRequest = []byte{}
)
func TestApiVersionsRequest(t *testing.T) {
var request *ApiVersionsRequest
request = new(ApiVersionsRequest)
testRequest(t, "basic", request, apiVersionRequest)
}

View file

@ -1,32 +0,0 @@
package sarama
import "testing"
var (
apiVersionResponse = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x03,
0x00, 0x02,
0x00, 0x01,
}
)
func TestApiVersionsResponse(t *testing.T) {
var response *ApiVersionsResponse
response = new(ApiVersionsResponse)
testVersionDecodable(t, "no error", response, apiVersionResponse, 0)
if response.Err != ErrNoError {
t.Error("Decoding error failed: no error expected but found", response.Err)
}
if response.ApiVersions[0].ApiKey != 0x03 {
t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey)
}
if response.ApiVersions[0].MinVersion != 0x02 {
t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion)
}
if response.ApiVersions[0].MaxVersion != 0x01 {
t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion)
}
}

View file

@ -1,845 +0,0 @@
package sarama
import (
"errors"
"log"
"os"
"os/signal"
"sync"
"testing"
"time"
)
const TestMessage = "ABC THE MESSAGE"
func closeProducer(t *testing.T, p AsyncProducer) {
var wg sync.WaitGroup
p.AsyncClose()
wg.Add(2)
go func() {
for range p.Successes() {
t.Error("Unexpected message on Successes()")
}
wg.Done()
}()
go func() {
for msg := range p.Errors() {
t.Error(msg.Err)
}
wg.Done()
}()
wg.Wait()
}
func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
expect := successes + errors
for expect > 0 {
select {
case msg := <-p.Errors():
if msg.Msg.flags != 0 {
t.Error("Message had flags set")
}
errors--
expect--
if errors < 0 {
t.Error(msg.Err)
}
case msg := <-p.Successes():
if msg.flags != 0 {
t.Error("Message had flags set")
}
successes--
expect--
if successes < 0 {
t.Error("Too many successes")
}
}
}
if successes != 0 || errors != 0 {
t.Error("Unexpected successes", successes, "or errors", errors)
}
}
type testPartitioner chan *int32
func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
part := <-p
if part == nil {
return 0, errors.New("BOOM")
}
return *part, nil
}
func (p testPartitioner) RequiresConsistency() bool {
return true
}
func (p testPartitioner) feed(partition int32) {
p <- &partition
}
type flakyEncoder bool
func (f flakyEncoder) Length() int {
return len(TestMessage)
}
func (f flakyEncoder) Encode() ([]byte, error) {
if !bool(f) {
return nil, errors.New("flaky encoding error")
}
return []byte(TestMessage), nil
}
func TestAsyncProducer(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
}
for i := 0; i < 10; i++ {
select {
case msg := <-producer.Errors():
t.Error(msg.Err)
if msg.Msg.flags != 0 {
t.Error("Message had flags set")
}
case msg := <-producer.Successes():
if msg.flags != 0 {
t.Error("Message had flags set")
}
if msg.Metadata.(int) != i {
t.Error("Message metadata did not match")
}
case <-time.After(time.Second):
t.Errorf("Timeout waiting for msg #%d", i)
goto done
}
}
done:
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerMultipleFlushes(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for flush := 0; flush < 3; flush++ {
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 5, 0)
}
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerMultipleBrokers(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader0 := NewMockBroker(t, 2)
leader1 := NewMockBroker(t, 3)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodResponse0 := new(ProduceResponse)
prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
leader0.Returns(prodResponse0)
prodResponse1 := new(ProduceResponse)
prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
leader1.Returns(prodResponse1)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
config.Producer.Partitioner = NewRoundRobinPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 10, 0)
closeProducer(t, producer)
leader1.Close()
leader0.Close()
seedBroker.Close()
}
func TestAsyncProducerCustomPartitioner(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodResponse := new(ProduceResponse)
prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodResponse)
config := NewConfig()
config.Producer.Flush.Messages = 2
config.Producer.Return.Successes = true
config.Producer.Partitioner = func(topic string) Partitioner {
p := make(testPartitioner)
go func() {
p.feed(0)
p <- nil
p <- nil
p <- nil
p.feed(0)
}()
return p
}
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
expectResults(t, producer, 2, 3)
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
func TestAsyncProducerFailureRetry(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader1 := NewMockBroker(t, 2)
leader2 := NewMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader1.Returns(prodNotLeader)
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
leader1.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader1.Close()
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader2.Close()
closeProducer(t, producer)
}
func TestAsyncProducerEncoderFailures(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
leader.Returns(prodSuccess)
config := NewConfig()
config.Producer.Flush.Messages = 1
config.Producer.Return.Successes = true
config.Producer.Partitioner = NewManualPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for flush := 0; flush < 3; flush++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)}
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)}
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)}
expectResults(t, producer, 1, 2)
}
closeProducer(t, producer)
leader.Close()
seedBroker.Close()
}
// If a Kafka broker becomes unavailable and then returns back in service, then
// producer reconnects to it and continues sending messages.
func TestAsyncProducerBrokerBounce(t *testing.T) {
// Given
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
leaderAddr := leader.Addr()
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
config := NewConfig()
config.Producer.Flush.Messages = 1
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// When: a broker connection gets reset by a broker (network glitch, restart, you name it).
leader.Close() // producer should get EOF
leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
// Then: a produced message goes through the new broker connection.
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader1 := NewMockBroker(t, 2)
leader2 := NewMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 3
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader1.Close() // producer should get EOF
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
// ok fine, tell it to go to leader2 finally
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader2.Close()
closeProducer(t, producer)
}
func TestAsyncProducerMultipleRetries(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader1 := NewMockBroker(t, 2)
leader2 := NewMockBroker(t, 3)
metadataLeader1 := new(MetadataResponse)
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader1)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Max = 4
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader1.Returns(prodNotLeader)
metadataLeader2 := new(MetadataResponse)
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader2)
leader2.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader1)
leader1.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader1)
leader1.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader2)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
leader2.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader1.Close()
leader2.Close()
closeProducer(t, producer)
}
func TestAsyncProducerOutOfRetries(t *testing.T) {
t.Skip("Enable once bug #294 is fixed.")
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
for i := 0; i < 10; i++ {
select {
case msg := <-producer.Errors():
if msg.Err != ErrNotLeaderForPartition {
t.Error(msg.Err)
}
case <-producer.Successes():
t.Error("Unexpected success")
}
}
seedBroker.Returns(metadataResponse)
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
leader.Close()
seedBroker.Close()
safeClose(t, producer)
}
func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
leaderAddr := leader.Addr()
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 1
config.Producer.Partitioner = NewRoundRobinPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// prime partition 0
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// prime partition 1
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
prodSuccess = new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// reboot the broker (the producer will get EOF on its existing connection)
leader.Close()
leader = NewMockBrokerAddr(t, 2, leaderAddr)
// send another message on partition 0 to trigger the EOF and retry
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
// tell partition 0 to go to that broker again
seedBroker.Returns(metadataResponse)
// succeed this time
prodSuccess = new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 1, 0)
// shutdown
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Producer.Flush.Messages = 5
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
config.Producer.Retry.Max = 1
config.Producer.Partitioner = NewManualPartitioner
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// prime partitions
for p := int32(0); p < 2; p++ {
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
}
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 5, 0)
}
// send more messages on partition 0
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
time.Sleep(50 * time.Millisecond)
leader.SetHandlerByMap(map[string]MockResponse{
"ProduceRequest": NewMockProduceResponse(t).
SetVersion(0).
SetError("my_topic", 0, ErrNoError),
})
// tell partition 0 to go to that broker again
seedBroker.Returns(metadataResponse)
// succeed this time
expectResults(t, producer, 5, 0)
// put five more through
for i := 0; i < 5; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
}
expectResults(t, producer, 5, 0)
// shutdown
closeProducer(t, producer)
seedBroker.Close()
leader.Close()
}
func TestAsyncProducerRetryShutdown(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataLeader := new(MetadataResponse)
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = true
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
producer.AsyncClose()
time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
producer.Input() <- &ProducerMessage{Topic: "FOO"}
if err := <-producer.Errors(); err.Err != ErrShuttingDown {
t.Error(err)
}
prodNotLeader := new(ProduceResponse)
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
leader.Returns(prodNotLeader)
seedBroker.Returns(metadataLeader)
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
expectResults(t, producer, 10, 0)
seedBroker.Close()
leader.Close()
// wait for the async-closed producer to shut down fully
for err := range producer.Errors() {
t.Error(err)
}
}
func TestAsyncProducerNoReturns(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
metadataLeader := new(MetadataResponse)
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataLeader)
config := NewConfig()
config.Producer.Flush.Messages = 10
config.Producer.Return.Successes = false
config.Producer.Return.Errors = false
config.Producer.Retry.Backoff = 0
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
}
wait := make(chan bool)
go func() {
if err := producer.Close(); err != nil {
t.Error(err)
}
close(wait)
}()
prodSuccess := new(ProduceResponse)
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
leader.Returns(prodSuccess)
<-wait
seedBroker.Close()
leader.Close()
}
// This example shows how to use the producer while simultaneously
// reading the Errors channel to know about any failures.
func ExampleAsyncProducer_select() {
producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
if err != nil {
panic(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Fatalln(err)
}
}()
// Trap SIGINT to trigger a shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var enqueued, errors int
ProducerLoop:
for {
select {
case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
enqueued++
case err := <-producer.Errors():
log.Println("Failed to produce message", err)
errors++
case <-signals:
break ProducerLoop
}
}
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
// This example shows how to use the producer with separate goroutines
// reading from the Successes and Errors channels. Note that in order
// for the Successes channel to be populated, you have to set
// config.Producer.Return.Successes to true.
func ExampleAsyncProducer_goroutines() {
config := NewConfig()
config.Producer.Return.Successes = true
producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
if err != nil {
panic(err)
}
// Trap SIGINT to trigger a graceful shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
var (
wg sync.WaitGroup
enqueued, successes, errors int
)
wg.Add(1)
go func() {
defer wg.Done()
for range producer.Successes() {
successes++
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for err := range producer.Errors() {
log.Println(err)
errors++
}
}()
ProducerLoop:
for {
message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
select {
case producer.Input() <- message:
enqueued++
case <-signals:
producer.AsyncClose() // Trigger a shutdown of the producer.
break ProducerLoop
}
}
wg.Wait()
log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
}

View file

@ -1,358 +0,0 @@
package sarama
import (
"fmt"
"testing"
"time"
)
func ExampleBroker() {
broker := NewBroker("localhost:9092")
err := broker.Open(nil)
if err != nil {
panic(err)
}
request := MetadataRequest{Topics: []string{"myTopic"}}
response, err := broker.GetMetadata(&request)
if err != nil {
_ = broker.Close()
panic(err)
}
fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
if err = broker.Close(); err != nil {
panic(err)
}
}
type mockEncoder struct {
bytes []byte
}
func (m mockEncoder) encode(pe packetEncoder) error {
return pe.putRawBytes(m.bytes)
}
type brokerMetrics struct {
bytesRead int
bytesWritten int
}
func TestBrokerAccessors(t *testing.T) {
broker := NewBroker("abc:123")
if broker.ID() != -1 {
t.Error("New broker didn't have an ID of -1.")
}
if broker.Addr() != "abc:123" {
t.Error("New broker didn't have the correct address")
}
broker.id = 34
if broker.ID() != 34 {
t.Error("Manually setting broker ID did not take effect.")
}
}
func TestSimpleBrokerCommunication(t *testing.T) {
for _, tt := range brokerTestTable {
Logger.Printf("Testing broker communication for %s", tt.name)
mb := NewMockBroker(t, 0)
mb.Returns(&mockEncoder{tt.response})
pendingNotify := make(chan brokerMetrics)
// Register a callback to be notified about successful requests
mb.SetNotifier(func(bytesRead, bytesWritten int) {
pendingNotify <- brokerMetrics{bytesRead, bytesWritten}
})
broker := NewBroker(mb.Addr())
// Set the broker id in order to validate local broker metrics
broker.id = 0
conf := NewConfig()
conf.Version = tt.version
err := broker.Open(conf)
if err != nil {
t.Fatal(err)
}
tt.runner(t, broker)
// Wait up to 500 ms for the remote broker to process the request and
// notify us about the metrics
timeout := 500 * time.Millisecond
select {
case mockBrokerMetrics := <-pendingNotify:
validateBrokerMetrics(t, broker, mockBrokerMetrics)
case <-time.After(timeout):
t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout)
}
mb.Close()
err = broker.Close()
if err != nil {
t.Error(err)
}
}
}
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
var brokerTestTable = []struct {
version KafkaVersion
name string
response []byte
runner func(*testing.T, *Broker)
}{
{V0_10_0_0,
"MetadataRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := MetadataRequest{}
response, err := broker.GetMetadata(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Metadata request got no response!")
}
}},
{V0_10_0_0,
"ConsumerMetadataRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ConsumerMetadataRequest{}
response, err := broker.GetConsumerMetadata(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Consumer Metadata request got no response!")
}
}},
{V0_10_0_0,
"ProduceRequest (NoResponse)",
[]byte{},
func(t *testing.T, broker *Broker) {
request := ProduceRequest{}
request.RequiredAcks = NoResponse
response, err := broker.Produce(&request)
if err != nil {
t.Error(err)
}
if response != nil {
t.Error("Produce request with NoResponse got a response!")
}
}},
{V0_10_0_0,
"ProduceRequest (WaitForLocal)",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ProduceRequest{}
request.RequiredAcks = WaitForLocal
response, err := broker.Produce(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Produce request without NoResponse got no response!")
}
}},
{V0_10_0_0,
"FetchRequest",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := FetchRequest{}
response, err := broker.Fetch(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Fetch request got no response!")
}
}},
{V0_10_0_0,
"OffsetFetchRequest",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetFetchRequest{}
response, err := broker.FetchOffset(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("OffsetFetch request got no response!")
}
}},
{V0_10_0_0,
"OffsetCommitRequest",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetCommitRequest{}
response, err := broker.CommitOffset(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("OffsetCommit request got no response!")
}
}},
{V0_10_0_0,
"OffsetRequest",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := OffsetRequest{}
response, err := broker.GetAvailableOffsets(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Offset request got no response!")
}
}},
{V0_10_0_0,
"JoinGroupRequest",
[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := JoinGroupRequest{}
response, err := broker.JoinGroup(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("JoinGroup request got no response!")
}
}},
{V0_10_0_0,
"SyncGroupRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := SyncGroupRequest{}
response, err := broker.SyncGroup(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("SyncGroup request got no response!")
}
}},
{V0_10_0_0,
"LeaveGroupRequest",
[]byte{0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := LeaveGroupRequest{}
response, err := broker.LeaveGroup(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("LeaveGroup request got no response!")
}
}},
{V0_10_0_0,
"HeartbeatRequest",
[]byte{0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := HeartbeatRequest{}
response, err := broker.Heartbeat(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("Heartbeat request got no response!")
}
}},
{V0_10_0_0,
"ListGroupsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ListGroupsRequest{}
response, err := broker.ListGroups(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("ListGroups request got no response!")
}
}},
{V0_10_0_0,
"DescribeGroupsRequest",
[]byte{0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := DescribeGroupsRequest{}
response, err := broker.DescribeGroups(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("DescribeGroups request got no response!")
}
}},
{V0_10_0_0,
"ApiVersionsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := ApiVersionsRequest{}
response, err := broker.ApiVersions(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("ApiVersions request got no response!")
}
}},
{V1_1_0_0,
"DeleteGroupsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := DeleteGroupsRequest{}
response, err := broker.DeleteGroups(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("DeleteGroups request got no response!")
}
}},
}
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
metricValidators := newMetricValidators()
mockBrokerBytesRead := mockBrokerMetrics.bytesRead
mockBrokerBytesWritten := mockBrokerMetrics.bytesWritten
// Check that the number of bytes sent corresponds to what the mock broker received
metricValidators.registerForAllBrokers(broker, countMeterValidator("incoming-byte-rate", mockBrokerBytesWritten))
if mockBrokerBytesWritten == 0 {
// This a ProduceRequest with NoResponse
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 0))
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 0))
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", 0, 0))
} else {
metricValidators.registerForAllBrokers(broker, countMeterValidator("response-rate", 1))
metricValidators.registerForAllBrokers(broker, countHistogramValidator("response-size", 1))
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("response-size", mockBrokerBytesWritten, mockBrokerBytesWritten))
}
// Check that the number of bytes received corresponds to what the mock broker sent
metricValidators.registerForAllBrokers(broker, countMeterValidator("outgoing-byte-rate", mockBrokerBytesRead))
metricValidators.registerForAllBrokers(broker, countMeterValidator("request-rate", 1))
metricValidators.registerForAllBrokers(broker, countHistogramValidator("request-size", 1))
metricValidators.registerForAllBrokers(broker, minMaxHistogramValidator("request-size", mockBrokerBytesRead, mockBrokerBytesRead))
// Run the validators
metricValidators.run(t, broker.conf.MetricRegistry)
}

View file

@ -1,660 +0,0 @@
package sarama
import (
"io"
"sync"
"testing"
"time"
)
func safeClose(t testing.TB, c io.Closer) {
err := c.Close()
if err != nil {
t.Error(err)
}
}
func TestSimpleClient(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
seedBroker.Returns(new(MetadataResponse))
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
safeClose(t, client)
}
func TestCachedPartitions(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
replicas := []int32{3, 1, 5}
isr := []int32{5, 1}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker("localhost:12345", 2)
metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
c, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
client := c.(*client)
// Verify they aren't cached the same
allP := client.cachedPartitionsResults["my_topic"][allPartitions]
writeP := client.cachedPartitionsResults["my_topic"][writablePartitions]
if len(allP) == len(writeP) {
t.Fatal("Invalid lengths!")
}
tmp := client.cachedPartitionsResults["my_topic"]
// Verify we actually use the cache at all!
tmp[allPartitions] = []int32{1, 2, 3, 4}
client.cachedPartitionsResults["my_topic"] = tmp
if 4 != len(client.cachedPartitions("my_topic", allPartitions)) {
t.Fatal("Not using the cache!")
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
replicas := []int32{seedBroker.BrokerID()}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse)
partitions, err := client.Partitions("unknown")
if err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if partitions != nil {
t.Errorf("Should return nil as partition list, found %v", partitions)
}
// Should still use the cache of a known topic
partitions, err = client.Partitions("my_topic")
if err != nil {
t.Errorf("Expected no error, found %v", err)
}
metadataResponse = new(MetadataResponse)
metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse)
// Should not use cache for unknown topic
partitions, err = client.Partitions("unknown")
if err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if partitions != nil {
t.Errorf("Should return nil as partition list, found %v", partitions)
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientSeedBrokers(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker("localhost:12345", 2)
seedBroker.Returns(metadataResponse)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
seedBroker.Close()
safeClose(t, client)
}
func TestClientMetadata(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 5)
replicas := []int32{3, 1, 5}
isr := []int32{5, 1}
metadataResponse := new(MetadataResponse)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError)
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable)
seedBroker.Returns(metadataResponse)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
topics, err := client.Topics()
if err != nil {
t.Error(err)
} else if len(topics) != 1 || topics[0] != "my_topic" {
t.Error("Client returned incorrect topics:", topics)
}
parts, err := client.Partitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 {
t.Error("Client returned incorrect partitions for my_topic:", parts)
}
parts, err = client.WritablePartitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 1 || parts[0] != 0 {
t.Error("Client returned incorrect writable partitions for my_topic:", parts)
}
tst, err := client.Leader("my_topic", 0)
if err != nil {
t.Error(err)
} else if tst.ID() != 5 {
t.Error("Leader for my_topic had incorrect ID.")
}
replicas, err = client.Replicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if replicas[0] != 3 {
t.Error("Incorrect (or sorted) replica")
} else if replicas[1] != 1 {
t.Error("Incorrect (or sorted) replica")
} else if replicas[2] != 5 {
t.Error("Incorrect (or sorted) replica")
}
isr, err = client.InSyncReplicas("my_topic", 0)
if err != nil {
t.Error(err)
} else if len(isr) != 2 {
t.Error("Client returned incorrect ISRs for partition:", isr)
} else if isr[0] != 5 {
t.Error("Incorrect (or sorted) ISR:", isr)
} else if isr[1] != 1 {
t.Error("Incorrect (or sorted) ISR:", isr)
}
leader.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientGetOffset(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 2)
leaderAddr := leader.Addr()
metadata := new(MetadataResponse)
metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError)
metadata.AddBroker(leaderAddr, leader.BrokerID())
seedBroker.Returns(metadata)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
offsetResponse := new(OffsetResponse)
offsetResponse.AddTopicPartition("foo", 0, 123)
leader.Returns(offsetResponse)
offset, err := client.GetOffset("foo", 0, OffsetNewest)
if err != nil {
t.Error(err)
}
if offset != 123 {
t.Error("Unexpected offset, got ", offset)
}
leader.Close()
seedBroker.Returns(metadata)
leader = NewMockBrokerAddr(t, 2, leaderAddr)
offsetResponse = new(OffsetResponse)
offsetResponse.AddTopicPartition("foo", 0, 456)
leader.Returns(offsetResponse)
offset, err = client.GetOffset("foo", 0, OffsetNewest)
if err != nil {
t.Error(err)
}
if offset != 456 {
t.Error("Unexpected offset, got ", offset)
}
seedBroker.Close()
leader.Close()
safeClose(t, client)
}
func TestClientReceivingUnknownTopic(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
metadataResponse1 := new(MetadataResponse)
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
metadataUnknownTopic := new(MetadataResponse)
metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataUnknownTopic)
seedBroker.Returns(metadataUnknownTopic)
if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition {
t.Error("ErrUnknownTopicOrPartition expected, got", err)
}
// If we are asking for the leader of a partition of the non-existing topic.
// we will request metadata again.
seedBroker.Returns(metadataUnknownTopic)
seedBroker.Returns(metadataUnknownTopic)
if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
safeClose(t, client)
seedBroker.Close()
}
func TestClientReceivingPartialMetadata(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 5)
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()}
metadataPartial := new(MetadataResponse)
metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable)
metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError)
metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable)
seedBroker.Returns(metadataPartial)
if err := client.RefreshMetadata("new_topic"); err != nil {
t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error")
}
// Even though the metadata was incomplete, we should be able to get the leader of a partition
// for which we did get a useful response, without doing additional requests.
partition0Leader, err := client.Leader("new_topic", 0)
if err != nil {
t.Error(err)
} else if partition0Leader.Addr() != leader.Addr() {
t.Error("Unexpected leader returned", partition0Leader.Addr())
}
// If we are asking for the leader of a partition that didn't have a leader before,
// we will do another metadata request.
seedBroker.Returns(metadataPartial)
// Still no leader for the partition, so asking for it should return an error.
_, err = client.Leader("new_topic", 1)
if err != ErrLeaderNotAvailable {
t.Error("Expected ErrLeaderNotAvailable, got", err)
}
safeClose(t, client)
seedBroker.Close()
leader.Close()
}
func TestClientRefreshBehaviour(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
leader := NewMockBroker(t, 5)
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID())
seedBroker.Returns(metadataResponse1)
metadataResponse2 := new(MetadataResponse)
metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError)
seedBroker.Returns(metadataResponse2)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
parts, err := client.Partitions("my_topic")
if err != nil {
t.Error(err)
} else if len(parts) != 1 || parts[0] != 0xb {
t.Error("Client returned incorrect partitions for my_topic:", parts)
}
tst, err := client.Leader("my_topic", 0xb)
if err != nil {
t.Error(err)
} else if tst.ID() != 5 {
t.Error("Leader for my_topic had incorrect ID.")
}
leader.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientResurrectDeadSeeds(t *testing.T) {
initialSeed := NewMockBroker(t, 0)
emptyMetadata := new(MetadataResponse)
initialSeed.Returns(emptyMetadata)
conf := NewConfig()
conf.Metadata.Retry.Backoff = 0
conf.Metadata.RefreshFrequency = 0
c, err := NewClient([]string{initialSeed.Addr()}, conf)
if err != nil {
t.Fatal(err)
}
initialSeed.Close()
client := c.(*client)
seed1 := NewMockBroker(t, 1)
seed2 := NewMockBroker(t, 2)
seed3 := NewMockBroker(t, 3)
addr1 := seed1.Addr()
addr2 := seed2.Addr()
addr3 := seed3.Addr()
// Overwrite the seed brokers with a fixed ordering to make this test deterministic.
safeClose(t, client.seedBrokers[0])
client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)}
client.deadSeeds = []*Broker{}
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
if err := client.RefreshMetadata(); err != nil {
t.Error(err)
}
wg.Done()
}()
seed1.Close()
seed2.Close()
seed1 = NewMockBrokerAddr(t, 1, addr1)
seed2 = NewMockBrokerAddr(t, 2, addr2)
seed3.Close()
seed1.Close()
seed2.Returns(emptyMetadata)
wg.Wait()
if len(client.seedBrokers) != 2 {
t.Error("incorrect number of live seeds")
}
if len(client.deadSeeds) != 1 {
t.Error("incorrect number of dead seeds")
}
safeClose(t, c)
}
func TestClientController(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
defer seedBroker.Close()
controllerBroker := NewMockBroker(t, 2)
defer controllerBroker.Close()
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetController(controllerBroker.BrokerID()).
SetBroker(seedBroker.Addr(), seedBroker.BrokerID()).
SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()),
})
cfg := NewConfig()
// test kafka version greater than 0.10.0.0
cfg.Version = V0_10_0_0
client1, err := NewClient([]string{seedBroker.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, client1)
broker, err := client1.Controller()
if err != nil {
t.Fatal(err)
}
if broker.Addr() != controllerBroker.Addr() {
t.Errorf("Expected controller to have address %s, found %s", controllerBroker.Addr(), broker.Addr())
}
// test kafka version earlier than 0.10.0.0
cfg.Version = V0_9_0_1
client2, err := NewClient([]string{seedBroker.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, client2)
if _, err = client2.Controller(); err != ErrControllerNotAvailable {
t.Errorf("Expected Contoller() to return %s, found %s", ErrControllerNotAvailable, err)
}
}
func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
staleCoordinator := NewMockBroker(t, 2)
freshCoordinator := NewMockBroker(t, 3)
replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()}
metadataResponse1 := new(MetadataResponse)
metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID())
metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID())
metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse1)
client, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
coordinatorResponse1 := new(ConsumerMetadataResponse)
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
seedBroker.Returns(coordinatorResponse1)
coordinatorResponse2 := new(ConsumerMetadataResponse)
coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID()
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
coordinatorResponse2.CoordinatorPort = staleCoordinator.Port()
seedBroker.Returns(coordinatorResponse2)
broker, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if staleCoordinator.Addr() != broker.Addr() {
t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr())
}
if staleCoordinator.BrokerID() != broker.ID() {
t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID())
}
// Grab the cached value
broker2, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if broker2.Addr() != broker.Addr() {
t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr())
}
coordinatorResponse3 := new(ConsumerMetadataResponse)
coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID()
coordinatorResponse3.CoordinatorHost = "127.0.0.1"
coordinatorResponse3.CoordinatorPort = freshCoordinator.Port()
seedBroker.Returns(coordinatorResponse3)
// Refresh the locally cahced value because it's stale
if err := client.RefreshCoordinator("my_group"); err != nil {
t.Error(err)
}
// Grab the fresh value
broker3, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if broker3.Addr() != freshCoordinator.Addr() {
t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr())
}
freshCoordinator.Close()
staleCoordinator.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
coordinator := NewMockBroker(t, 2)
metadataResponse1 := new(MetadataResponse)
seedBroker.Returns(metadataResponse1)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 0
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
coordinatorResponse1 := new(ConsumerMetadataResponse)
coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable
seedBroker.Returns(coordinatorResponse1)
metadataResponse2 := new(MetadataResponse)
metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition)
seedBroker.Returns(metadataResponse2)
replicas := []int32{coordinator.BrokerID()}
metadataResponse3 := new(MetadataResponse)
metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError)
seedBroker.Returns(metadataResponse3)
coordinatorResponse2 := new(ConsumerMetadataResponse)
coordinatorResponse2.CoordinatorID = coordinator.BrokerID()
coordinatorResponse2.CoordinatorHost = "127.0.0.1"
coordinatorResponse2.CoordinatorPort = coordinator.Port()
seedBroker.Returns(coordinatorResponse2)
broker, err := client.Coordinator("my_group")
if err != nil {
t.Error(err)
}
if coordinator.Addr() != broker.Addr() {
t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr())
}
if coordinator.BrokerID() != broker.ID() {
t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID())
}
coordinator.Close()
seedBroker.Close()
safeClose(t, client)
}
func TestClientAutorefreshShutdownRace(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
metadataResponse := new(MetadataResponse)
seedBroker.Returns(metadataResponse)
conf := NewConfig()
conf.Metadata.RefreshFrequency = 100 * time.Millisecond
client, err := NewClient([]string{seedBroker.Addr()}, conf)
if err != nil {
t.Fatal(err)
}
// Wait for the background refresh to kick in
time.Sleep(110 * time.Millisecond)
done := make(chan none)
go func() {
// Close the client
if err := client.Close(); err != nil {
t.Fatal(err)
}
close(done)
}()
// Wait for the Close to kick in
time.Sleep(10 * time.Millisecond)
// Then return some metadata to the still-running background thread
leader := NewMockBroker(t, 2)
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError)
seedBroker.Returns(metadataResponse)
<-done
seedBroker.Close()
// give the update time to happen so we get a panic if it's still running (which it shouldn't)
time.Sleep(10 * time.Millisecond)
}

View file

@ -1,206 +0,0 @@
package sarama
import (
"math/big"
"net"
"testing"
"time"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
)
func TestTLS(t *testing.T) {
cakey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
clientkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
hostkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
nvb := time.Now().Add(-1 * time.Hour)
nva := time.Now().Add(1 * time.Hour)
caTemplate := &x509.Certificate{
Subject: pkix.Name{CommonName: "ca"},
Issuer: pkix.Name{CommonName: "ca"},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
IsCA: true,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageCertSign,
}
caDer, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &cakey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
caFinalCert, err := x509.ParseCertificate(caDer)
if err != nil {
t.Fatal(err)
}
hostDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{
Subject: pkix.Name{CommonName: "host"},
Issuer: pkix.Name{CommonName: "ca"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}, caFinalCert, &hostkey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
clientDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{
Subject: pkix.Name{CommonName: "client"},
Issuer: pkix.Name{CommonName: "ca"},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, caFinalCert, &clientkey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
pool := x509.NewCertPool()
pool.AddCert(caFinalCert)
systemCerts, err := x509.SystemCertPool()
if err != nil {
t.Fatal(err)
}
// Keep server the same - it's the client that we're testing
serverTLSConfig := &tls.Config{
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{hostDer},
PrivateKey: hostkey,
}},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: pool,
}
for _, tc := range []struct {
Succeed bool
Server, Client *tls.Config
}{
{ // Verify client fails if wrong CA cert pool is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: systemCerts,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if wrong key is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: hostkey,
}},
},
},
{ // Verify client fails if wrong cert is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{hostDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if no CAs are specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if no keys are specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
},
},
{ // Finally, verify it all works happily with client and server cert in place
Succeed: true,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
} {
doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client)
}
}
func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientConfig *tls.Config) {
serverConfig.BuildNameToCertificate()
clientConfig.BuildNameToCertificate()
seedListener, err := tls.Listen("tcp", "127.0.0.1:0", serverConfig)
if err != nil {
t.Fatal("cannot open listener", err)
}
var childT *testing.T
if expectSuccess {
childT = t
} else {
childT = &testing.T{} // we want to swallow errors
}
seedBroker := NewMockBrokerListener(childT, 1, seedListener)
defer seedBroker.Close()
seedBroker.Returns(new(MetadataResponse))
config := NewConfig()
config.Net.TLS.Enable = true
config.Net.TLS.Config = clientConfig
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err == nil {
safeClose(t, client)
}
if expectSuccess {
if err != nil {
t.Fatal(err)
}
} else {
if err == nil {
t.Fatal("expected failure")
}
}
}

View file

@ -1,233 +0,0 @@
package sarama
import (
"os"
"testing"
"github.com/rcrowley/go-metrics"
)
func TestDefaultConfigValidates(t *testing.T) {
config := NewConfig()
if err := config.Validate(); err != nil {
t.Error(err)
}
if config.MetricRegistry == nil {
t.Error("Expected non nil metrics.MetricRegistry, got nil")
}
}
func TestInvalidClientIDConfigValidates(t *testing.T) {
config := NewConfig()
config.ClientID = "foo:bar"
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
t.Error("Expected invalid ClientID, got ", err)
}
}
func TestEmptyClientIDConfigValidates(t *testing.T) {
config := NewConfig()
config.ClientID = ""
if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" {
t.Error("Expected invalid ClientID, got ", err)
}
}
func TestNetConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"OpenRequests",
func(cfg *Config) {
cfg.Net.MaxOpenRequests = 0
},
"Net.MaxOpenRequests must be > 0"},
{"DialTimeout",
func(cfg *Config) {
cfg.Net.DialTimeout = 0
},
"Net.DialTimeout must be > 0"},
{"ReadTimeout",
func(cfg *Config) {
cfg.Net.ReadTimeout = 0
},
"Net.ReadTimeout must be > 0"},
{"WriteTimeout",
func(cfg *Config) {
cfg.Net.WriteTimeout = 0
},
"Net.WriteTimeout must be > 0"},
{"KeepAlive",
func(cfg *Config) {
cfg.Net.KeepAlive = -1
},
"Net.KeepAlive must be >= 0"},
{"SASL.User",
func(cfg *Config) {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = ""
},
"Net.SASL.User must not be empty when SASL is enabled"},
{"SASL.Password",
func(cfg *Config) {
cfg.Net.SASL.Enable = true
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = ""
},
"Net.SASL.Password must not be empty when SASL is enabled"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestMetadataConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"Retry.Max",
func(cfg *Config) {
cfg.Metadata.Retry.Max = -1
},
"Metadata.Retry.Max must be >= 0"},
{"Retry.Backoff",
func(cfg *Config) {
cfg.Metadata.Retry.Backoff = -1
},
"Metadata.Retry.Backoff must be >= 0"},
{"RefreshFrequency",
func(cfg *Config) {
cfg.Metadata.RefreshFrequency = -1
},
"Metadata.RefreshFrequency must be >= 0"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestProducerConfigValidates(t *testing.T) {
tests := []struct {
name string
cfg func(*Config) // resorting to using a function as a param because of internal composite structs
err string
}{
{
"MaxMessageBytes",
func(cfg *Config) {
cfg.Producer.MaxMessageBytes = 0
},
"Producer.MaxMessageBytes must be > 0"},
{"RequiredAcks",
func(cfg *Config) {
cfg.Producer.RequiredAcks = -2
},
"Producer.RequiredAcks must be >= -1"},
{"Timeout",
func(cfg *Config) {
cfg.Producer.Timeout = 0
},
"Producer.Timeout must be > 0"},
{"Partitioner",
func(cfg *Config) {
cfg.Producer.Partitioner = nil
},
"Producer.Partitioner must not be nil"},
{"Flush.Bytes",
func(cfg *Config) {
cfg.Producer.Flush.Bytes = -1
},
"Producer.Flush.Bytes must be >= 0"},
{"Flush.Messages",
func(cfg *Config) {
cfg.Producer.Flush.Messages = -1
},
"Producer.Flush.Messages must be >= 0"},
{"Flush.Frequency",
func(cfg *Config) {
cfg.Producer.Flush.Frequency = -1
},
"Producer.Flush.Frequency must be >= 0"},
{"Flush.MaxMessages",
func(cfg *Config) {
cfg.Producer.Flush.MaxMessages = -1
},
"Producer.Flush.MaxMessages must be >= 0"},
{"Flush.MaxMessages with Producer.Flush.Messages",
func(cfg *Config) {
cfg.Producer.Flush.MaxMessages = 1
cfg.Producer.Flush.Messages = 2
},
"Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set"},
{"Flush.Retry.Max",
func(cfg *Config) {
cfg.Producer.Retry.Max = -1
},
"Producer.Retry.Max must be >= 0"},
{"Flush.Retry.Backoff",
func(cfg *Config) {
cfg.Producer.Retry.Backoff = -1
},
"Producer.Retry.Backoff must be >= 0"},
}
for i, test := range tests {
c := NewConfig()
test.cfg(c)
if err := c.Validate(); string(err.(ConfigurationError)) != test.err {
t.Errorf("[%d]:[%s] Expected %s, Got %s\n", i, test.name, test.err, err)
}
}
}
func TestLZ4ConfigValidation(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionLZ4
if err := config.Validate(); string(err.(ConfigurationError)) != "lz4 compression requires Version >= V0_10_0_0" {
t.Error("Expected invalid lz4/kakfa version error, got ", err)
}
config.Version = V0_10_0_0
if err := config.Validate(); err != nil {
t.Error("Expected lz4 to work, got ", err)
}
}
// This example shows how to integrate with an existing registry as well as publishing metrics
// on the standard output
func ExampleConfig_metrics() {
// Our application registry
appMetricRegistry := metrics.NewRegistry()
appGauge := metrics.GetOrRegisterGauge("m1", appMetricRegistry)
appGauge.Update(1)
config := NewConfig()
// Use a prefix registry instead of the default local one
config.MetricRegistry = metrics.NewPrefixedChildRegistry(appMetricRegistry, "sarama.")
// Simulate a metric created by sarama without starting a broker
saramaGauge := metrics.GetOrRegisterGauge("m2", config.MetricRegistry)
saramaGauge.Update(2)
metrics.WriteOnce(appMetricRegistry, os.Stdout)
// Output:
// gauge m1
// value: 1
// gauge sarama.m2
// value: 2
}

View file

@ -1,73 +0,0 @@
package sarama
import (
"bytes"
"reflect"
"testing"
)
var (
groupMemberMetadata = []byte{
0, 1, // Version
0, 0, 0, 2, // Topic array length
0, 3, 'o', 'n', 'e', // Topic one
0, 3, 't', 'w', 'o', // Topic two
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
}
groupMemberAssignment = []byte{
0, 1, // Version
0, 0, 0, 1, // Topic array length
0, 3, 'o', 'n', 'e', // Topic one
0, 0, 0, 3, // Topic one, partition array length
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4
0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata
}
)
func TestConsumerGroupMemberMetadata(t *testing.T) {
meta := &ConsumerGroupMemberMetadata{
Version: 1,
Topics: []string{"one", "two"},
UserData: []byte{0x01, 0x02, 0x03},
}
buf, err := encode(meta, nil)
if err != nil {
t.Error("Failed to encode data", err)
} else if !bytes.Equal(groupMemberMetadata, buf) {
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf)
}
meta2 := new(ConsumerGroupMemberMetadata)
err = decode(buf, meta2)
if err != nil {
t.Error("Failed to decode data", err)
} else if !reflect.DeepEqual(meta, meta2) {
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2)
}
}
func TestConsumerGroupMemberAssignment(t *testing.T) {
amt := &ConsumerGroupMemberAssignment{
Version: 1,
Topics: map[string][]int32{
"one": {0, 2, 4},
},
UserData: []byte{0x01, 0x02, 0x03},
}
buf, err := encode(amt, nil)
if err != nil {
t.Error("Failed to encode data", err)
} else if !bytes.Equal(groupMemberAssignment, buf) {
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf)
}
amt2 := new(ConsumerGroupMemberAssignment)
err = decode(buf, amt2)
if err != nil {
t.Error("Failed to decode data", err)
} else if !reflect.DeepEqual(amt, amt2) {
t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2)
}
}

View file

@ -1,23 +0,0 @@
package sarama
import (
"testing"
)
var (
consumerMetadataRequestEmpty = []byte{
0x00, 0x00}
consumerMetadataRequestString = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'}
)
func TestConsumerMetadataRequest(t *testing.T) {
request := new(ConsumerMetadataRequest)
testEncodable(t, "empty string", request, consumerMetadataRequestEmpty)
testVersionDecodable(t, "empty string", request, consumerMetadataRequestEmpty, 0)
request.ConsumerGroup = "foobar"
testEncodable(t, "with string", request, consumerMetadataRequestString)
testVersionDecodable(t, "with string", request, consumerMetadataRequestString, 0)
}

View file

@ -1,44 +0,0 @@
package sarama
import "testing"
var (
consumerMetadataResponseError = []byte{
0x00, 0x0E,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
consumerMetadataResponseSuccess = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0xAB,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0xCC, 0xDD}
)
func TestConsumerMetadataResponseError(t *testing.T) {
response := &ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
testEncodable(t, "", response, consumerMetadataResponseError)
decodedResp := &ConsumerMetadataResponse{}
if err := versionedDecode(consumerMetadataResponseError, decodedResp, 0); err != nil {
t.Error("could not decode: ", err)
}
if decodedResp.Err != ErrOffsetsLoadInProgress {
t.Errorf("got %s, want %s", decodedResp.Err, ErrOffsetsLoadInProgress)
}
}
func TestConsumerMetadataResponseSuccess(t *testing.T) {
broker := NewBroker("foo:52445")
broker.id = 0xAB
response := ConsumerMetadataResponse{
Coordinator: broker,
CoordinatorID: 0xAB,
CoordinatorHost: "foo",
CoordinatorPort: 0xCCDD,
Err: ErrNoError,
}
testResponse(t, "success", &response, consumerMetadataResponseSuccess)
}

View file

@ -1,985 +0,0 @@
package sarama
import (
"log"
"os"
"os/signal"
"sync"
"testing"
"time"
)
var testMsg = StringEncoder("Foo")
// If a particular offset is provided then messages are consumed starting from
// that offset.
func TestConsumerOffsetManual(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
mockFetchResponse := NewMockFetchResponse(t, 1)
for i := 0; i < 10; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg)
}
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 2345),
"FetchRequest": mockFetchResponse,
})
// When
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
consumer, err := master.ConsumePartition("my_topic", 0, 1234)
if err != nil {
t.Fatal(err)
}
// Then: messages starting from offset 1234 are consumed.
for i := 0; i < 10; i++ {
select {
case message := <-consumer.Messages():
assertMessageOffset(t, message, int64(i+1234))
case err := <-consumer.Errors():
t.Error(err)
}
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If `OffsetNewest` is passed as the initial offset then the first consumed
// message is indeed corresponds to the offset that broker claims to be the
// newest in its metadata response.
func TestConsumerOffsetNewest(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 10).
SetOffset("my_topic", 0, OffsetOldest, 7),
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 9, testMsg).
SetMessage("my_topic", 0, 10, testMsg).
SetMessage("my_topic", 0, 11, testMsg).
SetHighWaterMark("my_topic", 0, 14),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest)
if err != nil {
t.Fatal(err)
}
// Then
assertMessageOffset(t, <-consumer.Messages(), 10)
if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 {
t.Errorf("Expected high water mark offset 14, found %d", hwmo)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// It is possible to close a partition consumer and create the same anew.
func TestConsumerRecreate(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 10, testMsg),
})
c, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, 10)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 10)
// When
safeClose(t, pc)
pc, err = c.ConsumePartition("my_topic", 0, 10)
if err != nil {
t.Fatal(err)
}
// Then
assertMessageOffset(t, <-pc.Messages(), 10)
safeClose(t, pc)
safeClose(t, c)
broker0.Close()
}
// An attempt to consume the same partition twice should fail.
func TestConsumerDuplicate(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": NewMockFetchResponse(t, 1),
})
config := NewConfig()
config.ChannelBufferSize = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc1, err := c.ConsumePartition("my_topic", 0, 0)
if err != nil {
t.Fatal(err)
}
// When
pc2, err := c.ConsumePartition("my_topic", 0, 0)
// Then
if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") {
t.Fatal("A partition cannot be consumed twice at the same time")
}
safeClose(t, pc1)
safeClose(t, c)
broker0.Close()
}
// If consumer fails to refresh metadata it keeps retrying with frequency
// specified by `Config.Consumer.Retry.Backoff`.
func TestConsumerLeaderRefreshError(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 100)
// Stage 1: my_topic/0 served by broker0
Logger.Printf(" STAGE 1")
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 123).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 123, testMsg),
})
config := NewConfig()
config.Net.ReadTimeout = 100 * time.Millisecond
config.Consumer.Retry.Backoff = 200 * time.Millisecond
config.Consumer.Return.Errors = true
config.Metadata.Retry.Max = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 123)
// Stage 2: broker0 says that it is no longer the leader for my_topic/0,
// but the requests to retrieve metadata fail with network timeout.
Logger.Printf(" STAGE 2")
fetchResponse2 := &FetchResponse{}
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": NewMockWrapper(fetchResponse2),
})
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
t.Errorf("Unexpected error: %v", consErr.Err)
}
// Stage 3: finally the metadata returned by broker0 tells that broker1 is
// a new leader for my_topic/0. Consumption resumes.
Logger.Printf(" STAGE 3")
broker1 := NewMockBroker(t, 101)
broker1.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 124, testMsg),
})
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetBroker(broker1.Addr(), broker1.BrokerID()).
SetLeader("my_topic", 0, broker1.BrokerID()),
})
assertMessageOffset(t, <-pc.Messages(), 124)
safeClose(t, pc)
safeClose(t, c)
broker1.Close()
broker0.Close()
}
func TestConsumerInvalidTopic(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 100)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()),
})
c, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
// Then
if pc != nil || err != ErrUnknownTopicOrPartition {
t.Errorf("Should fail with, err=%v", err)
}
safeClose(t, c)
broker0.Close()
}
// Nothing bad happens if a partition consumer that has no leader assigned at
// the moment is closed.
func TestConsumerClosePartitionWithoutLeader(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 100)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 123).
SetOffset("my_topic", 0, OffsetNewest, 1000),
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 123, testMsg),
})
config := NewConfig()
config.Net.ReadTimeout = 100 * time.Millisecond
config.Consumer.Retry.Backoff = 100 * time.Millisecond
config.Consumer.Return.Errors = true
config.Metadata.Retry.Max = 0
c, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-pc.Messages(), 123)
// broker0 says that it is no longer the leader for my_topic/0, but the
// requests to retrieve metadata fail with network timeout.
fetchResponse2 := &FetchResponse{}
fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": NewMockWrapper(fetchResponse2),
})
// When
if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers {
t.Errorf("Unexpected error: %v", consErr.Err)
}
// Then: the partition consumer can be closed without any problem.
safeClose(t, pc)
safeClose(t, c)
broker0.Close()
}
// If the initial offset passed on partition consumer creation is out of the
// actual offset range for the partition, then the partition consumer stops
// immediately closing its output channels.
func TestConsumerShutsDownOutOfRange(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse := new(FetchResponse)
fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 7),
"FetchRequest": NewMockWrapper(fetchResponse),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 101)
if err != nil {
t.Fatal(err)
}
// Then: consumer should shut down closing its messages and errors channels.
if _, ok := <-consumer.Messages(); ok {
t.Error("Expected the consumer to shut down")
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// If a fetch response contains messages with offsets that are smaller then
// requested, then such messages are ignored.
func TestConsumerExtraOffsets(t *testing.T) {
// Given
legacyFetchResponse := &FetchResponse{}
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 1)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 2)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 3)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 4)
newFetchResponse := &FetchResponse{Version: 4}
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 1)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
newFetchResponse.SetLastOffsetDelta("my_topic", 0, 4)
newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16
cfg := NewConfig()
cfg.Consumer.Return.Errors = true
if fetchResponse1.Version >= 4 {
cfg.Version = V0_11_0_0
offsetResponseVersion = 1
}
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{}
fetchResponse2.Version = fetchResponse1.Version
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(offsetResponseVersion).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
select {
case msg := <-consumer.Messages():
assertMessageOffset(t, msg, 3)
case err := <-consumer.Errors():
t.Fatal(err)
}
select {
case msg := <-consumer.Messages():
assertMessageOffset(t, msg, 4)
case err := <-consumer.Errors():
t.Fatal(err)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
}
func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) {
// Given
fetchResponse1 := &FetchResponse{Version: 4}
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1)
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2)
cfg := NewConfig()
cfg.Version = V0_11_0_0
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{}
fetchResponse2.Version = 4
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(1).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
assertMessageOffset(t, <-consumer.Messages(), 1)
assertMessageOffset(t, <-consumer.Messages(), 2)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
// It is fine if offsets of fetched messages are not sequential (although
// strictly increasing!).
func TestConsumerNonSequentialOffsets(t *testing.T) {
// Given
legacyFetchResponse := &FetchResponse{}
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 5)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 7)
legacyFetchResponse.AddMessage("my_topic", 0, nil, testMsg, 11)
newFetchResponse := &FetchResponse{Version: 4}
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
newFetchResponse.SetLastOffsetDelta("my_topic", 0, 11)
newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16
cfg := NewConfig()
if fetchResponse1.Version >= 4 {
cfg.Version = V0_11_0_0
offsetResponseVersion = 1
}
broker0 := NewMockBroker(t, 0)
fetchResponse2 := &FetchResponse{Version: fetchResponse1.Version}
fetchResponse2.AddError("my_topic", 0, ErrNoError)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetVersion(offsetResponseVersion).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 0),
"FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2),
})
master, err := NewConsumer([]string{broker0.Addr()}, cfg)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 3)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 5)
assertMessageOffset(t, <-consumer.Messages(), 7)
assertMessageOffset(t, <-consumer.Messages(), 11)
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
}
// If leadership for a partition is changing then consumer resolves the new
// leader and switches to it.
func TestConsumerRebalancingMultiplePartitions(t *testing.T) {
// initial setup
seedBroker := NewMockBroker(t, 10)
leader0 := NewMockBroker(t, 0)
leader1 := NewMockBroker(t, 1)
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(leader0.Addr(), leader0.BrokerID()).
SetBroker(leader1.Addr(), leader1.BrokerID()).
SetLeader("my_topic", 0, leader0.BrokerID()).
SetLeader("my_topic", 1, leader1.BrokerID()),
})
mockOffsetResponse1 := NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 0).
SetOffset("my_topic", 0, OffsetNewest, 1000).
SetOffset("my_topic", 1, OffsetOldest, 0).
SetOffset("my_topic", 1, OffsetNewest, 1000)
leader0.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse1,
"FetchRequest": NewMockFetchResponse(t, 1),
})
leader1.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse1,
"FetchRequest": NewMockFetchResponse(t, 1),
})
// launch test goroutines
config := NewConfig()
config.Consumer.Retry.Backoff = 50
master, err := NewConsumer([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// we expect to end up (eventually) consuming exactly ten messages on each partition
var wg sync.WaitGroup
for i := int32(0); i < 2; i++ {
consumer, err := master.ConsumePartition("my_topic", i, 0)
if err != nil {
t.Error(err)
}
go func(c PartitionConsumer) {
for err := range c.Errors() {
t.Error(err)
}
}(consumer)
wg.Add(1)
go func(partition int32, c PartitionConsumer) {
for i := 0; i < 10; i++ {
message := <-consumer.Messages()
if message.Offset != int64(i) {
t.Error("Incorrect message offset!", i, partition, message.Offset)
}
if message.Partition != partition {
t.Error("Incorrect message partition!")
}
}
safeClose(t, consumer)
wg.Done()
}(i, consumer)
}
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 1")
// Stage 1:
// * my_topic/0 -> leader0 serves 4 messages
// * my_topic/1 -> leader1 serves 0 messages
mockFetchResponse := NewMockFetchResponse(t, 1)
for i := 0; i < 4; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg)
}
leader0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse,
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 2")
// Stage 2:
// * leader0 says that it is no longer serving my_topic/0
// * seedBroker tells that leader1 is serving my_topic/0 now
// seed broker tells that the new partition 0 leader is leader1
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetLeader("my_topic", 0, leader1.BrokerID()).
SetLeader("my_topic", 1, leader1.BrokerID()),
})
// leader0 says no longer leader of partition 0
fetchResponse := new(FetchResponse)
fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition)
leader0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": NewMockWrapper(fetchResponse),
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 3")
// Stage 3:
// * my_topic/0 -> leader1 serves 3 messages
// * my_topic/1 -> leader1 server 8 messages
// leader1 provides 3 message on partition 0, and 8 messages on partition 1
mockFetchResponse2 := NewMockFetchResponse(t, 2)
for i := 4; i < 7; i++ {
mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg)
}
for i := 0; i < 8; i++ {
mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg)
}
leader1.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse2,
})
time.Sleep(50 * time.Millisecond)
Logger.Printf(" STAGE 4")
// Stage 4:
// * my_topic/0 -> leader1 serves 3 messages
// * my_topic/1 -> leader1 tells that it is no longer the leader
// * seedBroker tells that leader0 is a new leader for my_topic/1
// metadata assigns 0 to leader1 and 1 to leader0
seedBroker.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetLeader("my_topic", 0, leader1.BrokerID()).
SetLeader("my_topic", 1, leader0.BrokerID()),
})
// leader1 provides three more messages on partition0, says no longer leader of partition1
mockFetchResponse3 := NewMockFetchResponse(t, 3).
SetMessage("my_topic", 0, int64(7), testMsg).
SetMessage("my_topic", 0, int64(8), testMsg).
SetMessage("my_topic", 0, int64(9), testMsg)
fetchResponse4 := new(FetchResponse)
fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition)
leader1.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4),
})
// leader0 provides two messages on partition 1
mockFetchResponse4 := NewMockFetchResponse(t, 2)
for i := 8; i < 10; i++ {
mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg)
}
leader0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse4,
})
wg.Wait()
safeClose(t, master)
leader1.Close()
leader0.Close()
seedBroker.Close()
}
// When two partitions have the same broker as the leader, if one partition
// consumer channel buffer is full then that does not affect the ability to
// read messages by the other consumer.
func TestConsumerInterleavedClose(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).
SetLeader("my_topic", 1, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 1000).
SetOffset("my_topic", 0, OffsetNewest, 1100).
SetOffset("my_topic", 1, OffsetOldest, 2000).
SetOffset("my_topic", 1, OffsetNewest, 2100),
"FetchRequest": NewMockFetchResponse(t, 1).
SetMessage("my_topic", 0, 1000, testMsg).
SetMessage("my_topic", 0, 1001, testMsg).
SetMessage("my_topic", 0, 1002, testMsg).
SetMessage("my_topic", 1, 2000, testMsg),
})
config := NewConfig()
config.ChannelBufferSize = 0
master, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
c0, err := master.ConsumePartition("my_topic", 0, 1000)
if err != nil {
t.Fatal(err)
}
c1, err := master.ConsumePartition("my_topic", 1, 2000)
if err != nil {
t.Fatal(err)
}
// When/Then: we can read from partition 0 even if nobody reads from partition 1
assertMessageOffset(t, <-c0.Messages(), 1000)
assertMessageOffset(t, <-c0.Messages(), 1001)
assertMessageOffset(t, <-c0.Messages(), 1002)
safeClose(t, c1)
safeClose(t, c0)
safeClose(t, master)
broker0.Close()
}
func TestConsumerBounceWithReferenceOpen(t *testing.T) {
broker0 := NewMockBroker(t, 0)
broker0Addr := broker0.Addr()
broker1 := NewMockBroker(t, 1)
mockMetadataResponse := NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetBroker(broker1.Addr(), broker1.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()).
SetLeader("my_topic", 1, broker1.BrokerID())
mockOffsetResponse := NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetOldest, 1000).
SetOffset("my_topic", 0, OffsetNewest, 1100).
SetOffset("my_topic", 1, OffsetOldest, 2000).
SetOffset("my_topic", 1, OffsetNewest, 2100)
mockFetchResponse := NewMockFetchResponse(t, 1)
for i := 0; i < 10; i++ {
mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg)
mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg)
}
broker0.SetHandlerByMap(map[string]MockResponse{
"OffsetRequest": mockOffsetResponse,
"FetchRequest": mockFetchResponse,
})
broker1.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": mockMetadataResponse,
"OffsetRequest": mockOffsetResponse,
"FetchRequest": mockFetchResponse,
})
config := NewConfig()
config.Consumer.Return.Errors = true
config.Consumer.Retry.Backoff = 100 * time.Millisecond
config.ChannelBufferSize = 1
master, err := NewConsumer([]string{broker1.Addr()}, config)
if err != nil {
t.Fatal(err)
}
c0, err := master.ConsumePartition("my_topic", 0, 1000)
if err != nil {
t.Fatal(err)
}
c1, err := master.ConsumePartition("my_topic", 1, 2000)
if err != nil {
t.Fatal(err)
}
// read messages from both partition to make sure that both brokers operate
// normally.
assertMessageOffset(t, <-c0.Messages(), 1000)
assertMessageOffset(t, <-c1.Messages(), 2000)
// Simulate broker shutdown. Note that metadata response does not change,
// that is the leadership does not move to another broker. So partition
// consumer will keep retrying to restore the connection with the broker.
broker0.Close()
// Make sure that while the partition/0 leader is down, consumer/partition/1
// is capable of pulling messages from broker1.
for i := 1; i < 7; i++ {
offset := (<-c1.Messages()).Offset
if offset != int64(2000+i) {
t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i))
}
}
// Bring broker0 back to service.
broker0 = NewMockBrokerAddr(t, 0, broker0Addr)
broker0.SetHandlerByMap(map[string]MockResponse{
"FetchRequest": mockFetchResponse,
})
// Read the rest of messages from both partitions.
for i := 7; i < 10; i++ {
assertMessageOffset(t, <-c1.Messages(), int64(2000+i))
}
for i := 1; i < 10; i++ {
assertMessageOffset(t, <-c0.Messages(), int64(1000+i))
}
select {
case <-c0.Errors():
default:
t.Errorf("Partition consumer should have detected broker restart")
}
safeClose(t, c1)
safeClose(t, c0)
safeClose(t, master)
broker0.Close()
broker1.Close()
}
func TestConsumerOffsetOutOfRange(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 2)
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 2345),
})
master, err := NewConsumer([]string{broker0.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
// When/Then
if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange {
t.Fatal("Should return ErrOffsetOutOfRange, got:", err)
}
safeClose(t, master)
broker0.Close()
}
func TestConsumerExpiryTicker(t *testing.T) {
// Given
broker0 := NewMockBroker(t, 0)
fetchResponse1 := &FetchResponse{}
for i := 1; i <= 8; i++ {
fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, int64(i))
}
broker0.SetHandlerByMap(map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetBroker(broker0.Addr(), broker0.BrokerID()).
SetLeader("my_topic", 0, broker0.BrokerID()),
"OffsetRequest": NewMockOffsetResponse(t).
SetOffset("my_topic", 0, OffsetNewest, 1234).
SetOffset("my_topic", 0, OffsetOldest, 1),
"FetchRequest": NewMockSequence(fetchResponse1),
})
config := NewConfig()
config.ChannelBufferSize = 0
config.Consumer.MaxProcessingTime = 10 * time.Millisecond
master, err := NewConsumer([]string{broker0.Addr()}, config)
if err != nil {
t.Fatal(err)
}
// When
consumer, err := master.ConsumePartition("my_topic", 0, 1)
if err != nil {
t.Fatal(err)
}
// Then: messages with offsets 1 through 8 are read
for i := 1; i <= 8; i++ {
assertMessageOffset(t, <-consumer.Messages(), int64(i))
time.Sleep(2 * time.Millisecond)
}
safeClose(t, consumer)
safeClose(t, master)
broker0.Close()
}
func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) {
if msg.Offset != expectedOffset {
t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset)
}
}
// This example shows how to use the consumer to read messages
// from a single partition.
func ExampleConsumer() {
consumer, err := NewConsumer([]string{"localhost:9092"}, nil)
if err != nil {
panic(err)
}
defer func() {
if err := consumer.Close(); err != nil {
log.Fatalln(err)
}
}()
partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest)
if err != nil {
panic(err)
}
defer func() {
if err := partitionConsumer.Close(); err != nil {
log.Fatalln(err)
}
}()
// Trap SIGINT to trigger a shutdown.
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
consumed := 0
ConsumerLoop:
for {
select {
case msg := <-partitionConsumer.Messages():
log.Printf("Consumed message offset %d\n", msg.Offset)
consumed++
case <-signals:
break ConsumerLoop
}
}
log.Printf("Consumed: %d\n", consumed)
}

View file

@ -1,50 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
createPartitionRequestNoAssignment = []byte{
0, 0, 0, 1, // one topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
255, 255, 255, 255, // no assignments
0, 0, 0, 100, // timeout
0, // validate only = false
}
createPartitionRequestAssignment = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 3, // 3 partitions
0, 0, 0, 2,
0, 0, 0, 2,
0, 0, 0, 2, 0, 0, 0, 3,
0, 0, 0, 2,
0, 0, 0, 3, 0, 0, 0, 1,
0, 0, 0, 100,
1, // validate only = true
}
)
func TestCreatePartitionsRequest(t *testing.T) {
req := &CreatePartitionsRequest{
TopicPartitions: map[string]*TopicPartition{
"topic": &TopicPartition{
Count: 3,
},
},
Timeout: 100 * time.Millisecond,
}
buf := testRequestEncode(t, "no assignment", req, createPartitionRequestNoAssignment)
testRequestDecode(t, "no assignment", req, buf)
req.ValidateOnly = true
req.TopicPartitions["topic"].Assignment = [][]int32{{2, 3}, {3, 1}}
buf = testRequestEncode(t, "assignment", req, createPartitionRequestAssignment)
testRequestDecode(t, "assignment", req, buf)
}

View file

@ -1,52 +0,0 @@
package sarama
import (
"reflect"
"testing"
"time"
)
var (
createPartitionResponseSuccess = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, // no error
255, 255, // no error message
}
createPartitionResponseFail = []byte{
0, 0, 0, 100, // throttleTimeMs
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 37, // partition error
0, 5, 'e', 'r', 'r', 'o', 'r',
}
)
func TestCreatePartitionsResponse(t *testing.T) {
resp := &CreatePartitionsResponse{
ThrottleTime: 100 * time.Millisecond,
TopicPartitionErrors: map[string]*TopicPartitionError{
"topic": &TopicPartitionError{},
},
}
testResponse(t, "success", resp, createPartitionResponseSuccess)
decodedresp := new(CreatePartitionsResponse)
testVersionDecodable(t, "success", decodedresp, createPartitionResponseSuccess, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
errMsg := "error"
resp.TopicPartitionErrors["topic"].Err = ErrInvalidPartitions
resp.TopicPartitionErrors["topic"].ErrMsg = &errMsg
testResponse(t, "with errors", resp, createPartitionResponseFail)
decodedresp = new(CreatePartitionsResponse)
testVersionDecodable(t, "with errors", decodedresp, createPartitionResponseFail, 0)
if !reflect.DeepEqual(decodedresp, resp) {
t.Errorf("Decoding error: expected %v but got %v", decodedresp, resp)
}
}

View file

@ -1,50 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
createTopicsRequestV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
255, 255, 255, 255,
255, 255,
0, 0, 0, 1, // 1 replica assignment
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2,
0, 0, 0, 1, // 1 config
0, 12, 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
0, 2, '-', '1',
0, 0, 0, 100,
}
createTopicsRequestV1 = append(createTopicsRequestV0, byte(1))
)
func TestCreateTopicsRequest(t *testing.T) {
retention := "-1"
req := &CreateTopicsRequest{
TopicDetails: map[string]*TopicDetail{
"topic": {
NumPartitions: -1,
ReplicationFactor: -1,
ReplicaAssignment: map[int32][]int32{
0: []int32{0, 1, 2},
},
ConfigEntries: map[string]*string{
"retention.ms": &retention,
},
},
},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "version 0", req, createTopicsRequestV0)
req.Version = 1
req.ValidateOnly = true
testRequest(t, "version 1", req, createTopicsRequestV1)
}

View file

@ -1,52 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
createTopicsResponseV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
}
createTopicsResponseV1 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
0, 3, 'm', 's', 'g',
}
createTopicsResponseV2 = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
0, 3, 'm', 's', 'g',
}
)
func TestCreateTopicsResponse(t *testing.T) {
resp := &CreateTopicsResponse{
TopicErrors: map[string]*TopicError{
"topic": &TopicError{
Err: ErrInvalidRequest,
},
},
}
testResponse(t, "version 0", resp, createTopicsResponseV0)
resp.Version = 1
msg := "msg"
resp.TopicErrors["topic"].ErrMsg = &msg
testResponse(t, "version 1", resp, createTopicsResponseV1)
resp.Version = 2
resp.ThrottleTime = 100 * time.Millisecond
testResponse(t, "version 2", resp, createTopicsResponseV2)
}

View file

@ -1,34 +0,0 @@
package sarama
import "testing"
var (
emptyDeleteGroupsRequest = []byte{0, 0, 0, 0}
singleDeleteGroupsRequest = []byte{
0, 0, 0, 1, // 1 group
0, 3, 'f', 'o', 'o', // group name: foo
}
doubleDeleteGroupsRequest = []byte{
0, 0, 0, 2, // 2 groups
0, 3, 'f', 'o', 'o', // group name: foo
0, 3, 'b', 'a', 'r', // group name: foo
}
)
func TestDeleteGroupsRequest(t *testing.T) {
var request *DeleteGroupsRequest
request = new(DeleteGroupsRequest)
testRequest(t, "no groups", request, emptyDeleteGroupsRequest)
request = new(DeleteGroupsRequest)
request.AddGroup("foo")
testRequest(t, "one group", request, singleDeleteGroupsRequest)
request = new(DeleteGroupsRequest)
request.AddGroup("foo")
request.AddGroup("bar")
testRequest(t, "two groups", request, doubleDeleteGroupsRequest)
}

View file

@ -1,57 +0,0 @@
package sarama
import (
"testing"
)
var (
emptyDeleteGroupsResponse = []byte{
0, 0, 0, 0, // does not violate any quota
0, 0, 0, 0, // no groups
}
errorDeleteGroupsResponse = []byte{
0, 0, 0, 0, // does not violate any quota
0, 0, 0, 1, // 1 group
0, 3, 'f', 'o', 'o', // group name
0, 31, // error ErrClusterAuthorizationFailed
}
noErrorDeleteGroupsResponse = []byte{
0, 0, 0, 0, // does not violate any quota
0, 0, 0, 1, // 1 group
0, 3, 'f', 'o', 'o', // group name
0, 0, // no error
}
)
func TestDeleteGroupsResponse(t *testing.T) {
var response *DeleteGroupsResponse
response = new(DeleteGroupsResponse)
testVersionDecodable(t, "empty", response, emptyDeleteGroupsResponse, 0)
if response.ThrottleTime != 0 {
t.Error("Expected no violation")
}
if len(response.GroupErrorCodes) != 0 {
t.Error("Expected no groups")
}
response = new(DeleteGroupsResponse)
testVersionDecodable(t, "error", response, errorDeleteGroupsResponse, 0)
if response.ThrottleTime != 0 {
t.Error("Expected no violation")
}
if response.GroupErrorCodes["foo"] != ErrClusterAuthorizationFailed {
t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"])
}
response = new(DeleteGroupsResponse)
testVersionDecodable(t, "no error", response, noErrorDeleteGroupsResponse, 0)
if response.ThrottleTime != 0 {
t.Error("Expected no violation")
}
if response.GroupErrorCodes["foo"] != ErrNoError {
t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"])
}
}

View file

@ -1,36 +0,0 @@
package sarama
import (
"testing"
"time"
)
var deleteRecordsRequest = []byte{
0, 0, 0, 2,
0, 5, 'o', 't', 'h', 'e', 'r',
0, 0, 0, 0,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 2,
0, 0, 0, 19,
0, 0, 0, 0, 0, 0, 0, 200,
0, 0, 0, 20,
0, 0, 0, 0, 0, 0, 0, 190,
0, 0, 0, 100,
}
func TestDeleteRecordsRequest(t *testing.T) {
req := &DeleteRecordsRequest{
Topics: map[string]*DeleteRecordsRequestTopic{
"topic": {
PartitionOffsets: map[int32]int64{
19: 200,
20: 190,
},
},
"other": {},
},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "", req, deleteRecordsRequest)
}

View file

@ -1,39 +0,0 @@
package sarama
import (
"testing"
"time"
)
var deleteRecordsResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 2,
0, 5, 'o', 't', 'h', 'e', 'r',
0, 0, 0, 0,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 2,
0, 0, 0, 19,
0, 0, 0, 0, 0, 0, 0, 200,
0, 0,
0, 0, 0, 20,
255, 255, 255, 255, 255, 255, 255, 255,
0, 3,
}
func TestDeleteRecordsResponse(t *testing.T) {
resp := &DeleteRecordsResponse{
Version: 0,
ThrottleTime: 100 * time.Millisecond,
Topics: map[string]*DeleteRecordsResponseTopic{
"topic": {
Partitions: map[int32]*DeleteRecordsResponsePartition{
19: {LowWatermark: 200, Err: 0},
20: {LowWatermark: -1, Err: 3},
},
},
"other": {},
},
}
testResponse(t, "", resp, deleteRecordsResponse)
}

View file

@ -1,33 +0,0 @@
package sarama
import (
"testing"
"time"
)
var deleteTopicsRequest = []byte{
0, 0, 0, 2,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 5, 'o', 't', 'h', 'e', 'r',
0, 0, 0, 100,
}
func TestDeleteTopicsRequestV0(t *testing.T) {
req := &DeleteTopicsRequest{
Version: 0,
Topics: []string{"topic", "other"},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "", req, deleteTopicsRequest)
}
func TestDeleteTopicsRequestV1(t *testing.T) {
req := &DeleteTopicsRequest{
Version: 1,
Topics: []string{"topic", "other"},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "", req, deleteTopicsRequest)
}

View file

@ -1,36 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
deleteTopicsResponseV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0,
}
deleteTopicsResponseV1 = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0,
}
)
func TestDeleteTopicsResponse(t *testing.T) {
resp := &DeleteTopicsResponse{
TopicErrorCodes: map[string]KError{
"topic": ErrNoError,
},
}
testResponse(t, "version 0", resp, deleteTopicsResponseV0)
resp.Version = 1
resp.ThrottleTime = 100 * time.Millisecond
testResponse(t, "version 1", resp, deleteTopicsResponseV1)
}

View file

@ -1,90 +0,0 @@
package sarama
import "testing"
var (
emptyDescribeConfigsRequest = []byte{
0, 0, 0, 0, // 0 configs
}
singleDescribeConfigsRequest = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
}
doubleDescribeConfigsRequest = []byte{
0, 0, 0, 2, // 2 configs
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 2, //2 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 12, // 12 chars
'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
2, // a topic
0, 3, 'b', 'a', 'r', // topic name: foo
0, 0, 0, 1, // 1 config
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
}
singleDescribeConfigsRequestAllConfigs = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
255, 255, 255, 255, // no configs
}
)
func TestDescribeConfigsRequest(t *testing.T) {
var request *DescribeConfigsRequest
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{},
}
testRequest(t, "no requests", request, emptyDescribeConfigsRequest)
configs := []string{"segment.ms"}
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
ConfigNames: configs,
},
},
}
testRequest(t, "one config", request, singleDescribeConfigsRequest)
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
ConfigNames: []string{"segment.ms", "retention.ms"},
},
&ConfigResource{
Type: TopicResource,
Name: "bar",
ConfigNames: []string{"segment.ms"},
},
},
}
testRequest(t, "two configs", request, doubleDescribeConfigsRequest)
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
},
},
}
testRequest(t, "one topic, all configs", request, singleDescribeConfigsRequestAllConfigs)
}

View file

@ -1,60 +0,0 @@
package sarama
import (
"testing"
)
var (
describeConfigsResponseEmpty = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 0, // no configs
}
describeConfigsResponsePopulated = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 1, // response
0, 0, //errorcode
0, 0, //string
2, // topic
0, 3, 'f', 'o', 'o',
0, 0, 0, 1, //configs
0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4, '1', '0', '0', '0',
0, // ReadOnly
0, // Default
0, // Sensitive
}
)
func TestDescribeConfigsResponse(t *testing.T) {
var response *DescribeConfigsResponse
response = &DescribeConfigsResponse{
Resources: []*ResourceResponse{},
}
testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0)
if len(response.Resources) != 0 {
t.Error("Expected no groups")
}
response = &DescribeConfigsResponse{
Resources: []*ResourceResponse{
&ResourceResponse{
ErrorCode: 0,
ErrorMsg: "",
Type: TopicResource,
Name: "foo",
Configs: []*ConfigEntry{
&ConfigEntry{
Name: "segment.ms",
Value: "1000",
ReadOnly: false,
Default: false,
Sensitive: false,
},
},
},
},
}
testResponse(t, "response with error", response, describeConfigsResponsePopulated)
}

View file

@ -1,34 +0,0 @@
package sarama
import "testing"
var (
emptyDescribeGroupsRequest = []byte{0, 0, 0, 0}
singleDescribeGroupsRequest = []byte{
0, 0, 0, 1, // 1 group
0, 3, 'f', 'o', 'o', // group name: foo
}
doubleDescribeGroupsRequest = []byte{
0, 0, 0, 2, // 2 groups
0, 3, 'f', 'o', 'o', // group name: foo
0, 3, 'b', 'a', 'r', // group name: foo
}
)
func TestDescribeGroupsRequest(t *testing.T) {
var request *DescribeGroupsRequest
request = new(DescribeGroupsRequest)
testRequest(t, "no groups", request, emptyDescribeGroupsRequest)
request = new(DescribeGroupsRequest)
request.AddGroup("foo")
testRequest(t, "one group", request, singleDescribeGroupsRequest)
request = new(DescribeGroupsRequest)
request.AddGroup("foo")
request.AddGroup("bar")
testRequest(t, "two groups", request, doubleDescribeGroupsRequest)
}

View file

@ -1,91 +0,0 @@
package sarama
import (
"reflect"
"testing"
)
var (
describeGroupsResponseEmpty = []byte{
0, 0, 0, 0, // no groups
}
describeGroupsResponsePopulated = []byte{
0, 0, 0, 2, // 2 groups
0, 0, // no error
0, 3, 'f', 'o', 'o', // Group ID
0, 3, 'b', 'a', 'r', // State
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type
0, 3, 'b', 'a', 'z', // Protocol name
0, 0, 0, 1, // 1 member
0, 2, 'i', 'd', // Member ID
0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID
0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host
0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata
0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment
0, 30, // ErrGroupAuthorizationFailed
0, 0,
0, 0,
0, 0,
0, 0,
0, 0, 0, 0,
}
)
func TestDescribeGroupsResponse(t *testing.T) {
var response *DescribeGroupsResponse
response = new(DescribeGroupsResponse)
testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0)
if len(response.Groups) != 0 {
t.Error("Expected no groups")
}
response = new(DescribeGroupsResponse)
testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0)
if len(response.Groups) != 2 {
t.Error("Expected two groups")
}
group0 := response.Groups[0]
if group0.Err != ErrNoError {
t.Error("Unxpected groups[0].Err, found", group0.Err)
}
if group0.GroupId != "foo" {
t.Error("Unxpected groups[0].GroupId, found", group0.GroupId)
}
if group0.State != "bar" {
t.Error("Unxpected groups[0].State, found", group0.State)
}
if group0.ProtocolType != "consumer" {
t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType)
}
if group0.Protocol != "baz" {
t.Error("Unxpected groups[0].Protocol, found", group0.Protocol)
}
if len(group0.Members) != 1 {
t.Error("Unxpected groups[0].Members, found", group0.Members)
}
if group0.Members["id"].ClientId != "sarama" {
t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId)
}
if group0.Members["id"].ClientHost != "localhost" {
t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost)
}
if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) {
t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata)
}
if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) {
t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment)
}
group1 := response.Groups[1]
if group1.Err != ErrGroupAuthorizationFailed {
t.Error("Unxpected groups[1].Err, found", group0.Err)
}
if len(group1.Members) != 0 {
t.Error("Unxpected groups[1].Members, found", group0.Members)
}
}

View file

@ -1,23 +0,0 @@
package sarama
import "testing"
var (
endTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64,
0, 1,
1,
}
)
func TestEndTxnRequest(t *testing.T) {
req := &EndTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 1,
TransactionResult: true,
}
testRequest(t, "", req, endTxnRequest)
}

View file

@ -1,22 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
endTxnResponse = []byte{
0, 0, 0, 100,
0, 49,
}
)
func TestEndTxnResponse(t *testing.T) {
resp := &EndTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrInvalidProducerIDMapping,
}
testResponse(t, "", resp, endTxnResponse)
}

View file

@ -1,48 +0,0 @@
package sarama
import "testing"
var (
fetchRequestNoBlocks = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
fetchRequestWithProperties = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF,
0x00, 0x00, 0x00, 0x00}
fetchRequestOneBlock = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
fetchRequestOneBlockV4 = []byte{
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF,
0x01,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56}
)
func TestFetchRequest(t *testing.T) {
request := new(FetchRequest)
testRequest(t, "no blocks", request, fetchRequestNoBlocks)
request.MaxWaitTime = 0x20
request.MinBytes = 0xEF
testRequest(t, "with properties", request, fetchRequestWithProperties)
request.MaxWaitTime = 0
request.MinBytes = 0
request.AddBlock("topic", 0x12, 0x34, 0x56)
testRequest(t, "one block", request, fetchRequestOneBlock)
request.Version = 4
request.MaxBytes = 0xFF
request.Isolation = ReadCommitted
testRequest(t, "one block v4", request, fetchRequestOneBlockV4)
}

View file

@ -1,248 +0,0 @@
package sarama
import (
"bytes"
"testing"
)
var (
emptyFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00}
oneMessageFetchResponse = []byte{
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x05,
0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10,
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
oneRecordFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00, // ThrottleTime
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0x05, // Partition
0x00, 0x01, // Error
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
0x00, 0x00, 0x00, 0x52, // Records length
// recordBatch
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x46,
0x00, 0x00, 0x00, 0x00,
0x02,
0xDB, 0x47, 0x14, 0xC9,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
// record
0x28,
0x00,
0x0A,
0x00,
0x08, 0x01, 0x02, 0x03, 0x04,
0x06, 0x05, 0x06, 0x07,
0x02,
0x06, 0x08, 0x09, 0x0A,
0x04, 0x0B, 0x0C}
oneMessageFetchResponseV4 = []byte{
0x00, 0x00, 0x00, 0x00, // ThrottleTime
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0x05, // Partition
0x00, 0x01, // Error
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // High Watermark Offset
0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, // Last Stable Offset
0x00, 0x00, 0x00, 0x00, // Number of Aborted Transactions
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
)
func TestEmptyFetchResponse(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0)
if len(response.Blocks) != 0 {
t.Error("Decoding produced topic blocks where there were none.")
}
}
func TestOneMessageFetchResponse(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing message where there wasn't one.")
}
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of messages.")
}
msgBlock := block.RecordsSet[0].MsgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
msg := msgBlock.Msg
if msg.Codec != CompressionNone {
t.Error("Decoding produced incorrect message compression.")
}
if msg.Key != nil {
t.Error("Decoding produced message key where there was none.")
}
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
t.Error("Decoding produced incorrect message value.")
}
}
func TestOneRecordFetchResponse(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one record", &response, oneRecordFetchResponse, 4)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
rec := block.RecordsSet[0].RecordBatch.Records[0]
if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) {
t.Error("Decoding produced incorrect record key.")
}
if !bytes.Equal(rec.Value, []byte{0x05, 0x06, 0x07}) {
t.Error("Decoding produced incorrect record value.")
}
}
func TestOneMessageFetchResponseV4(t *testing.T) {
response := FetchResponse{}
testVersionDecodable(t, "one message v4", &response, oneMessageFetchResponseV4, 4)
if len(response.Blocks) != 1 {
t.Fatal("Decoding produced incorrect number of topic blocks.")
}
if len(response.Blocks["topic"]) != 1 {
t.Fatal("Decoding produced incorrect number of partition blocks for topic.")
}
block := response.GetBlock("topic", 5)
if block == nil {
t.Fatal("GetBlock didn't return block.")
}
if block.Err != ErrOffsetOutOfRange {
t.Error("Decoding didn't produce correct error code.")
}
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if partial {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
msgBlock := block.RecordsSet[0].MsgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
msg := msgBlock.Msg
if msg.Codec != CompressionNone {
t.Error("Decoding produced incorrect message compression.")
}
if msg.Key != nil {
t.Error("Decoding produced message key where there was none.")
}
if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) {
t.Error("Decoding produced incorrect message value.")
}
}

View file

@ -1,33 +0,0 @@
package sarama
import "testing"
var (
findCoordinatorRequestConsumerGroup = []byte{
0, 5, 'g', 'r', 'o', 'u', 'p',
0,
}
findCoordinatorRequestTransaction = []byte{
0, 13, 't', 'r', 'a', 'n', 's', 'a', 'c', 't', 'i', 'o', 'n', 'i', 'd',
1,
}
)
func TestFindCoordinatorRequest(t *testing.T) {
req := &FindCoordinatorRequest{
Version: 1,
CoordinatorKey: "group",
CoordinatorType: CoordinatorGroup,
}
testRequest(t, "version 1 - group", req, findCoordinatorRequestConsumerGroup)
req = &FindCoordinatorRequest{
Version: 1,
CoordinatorKey: "transactionid",
CoordinatorType: CoordinatorTransaction,
}
testRequest(t, "version 1 - transaction", req, findCoordinatorRequestTransaction)
}

View file

@ -1,83 +0,0 @@
package sarama
import (
"testing"
"time"
)
func TestFindCoordinatorResponse(t *testing.T) {
errMsg := "kaboom"
for _, tc := range []struct {
desc string
response *FindCoordinatorResponse
encoded []byte
}{{
desc: "version 0 - no error",
response: &FindCoordinatorResponse{
Version: 0,
Err: ErrNoError,
Coordinator: &Broker{
id: 7,
addr: "host:9092",
},
},
encoded: []byte{
0, 0, // Err
0, 0, 0, 7, // Coordinator.ID
0, 4, 'h', 'o', 's', 't', // Coordinator.Host
0, 0, 35, 132, // Coordinator.Port
},
}, {
desc: "version 1 - no error",
response: &FindCoordinatorResponse{
Version: 1,
ThrottleTime: 100 * time.Millisecond,
Err: ErrNoError,
Coordinator: &Broker{
id: 7,
addr: "host:9092",
},
},
encoded: []byte{
0, 0, 0, 100, // ThrottleTime
0, 0, // Err
255, 255, // ErrMsg: empty
0, 0, 0, 7, // Coordinator.ID
0, 4, 'h', 'o', 's', 't', // Coordinator.Host
0, 0, 35, 132, // Coordinator.Port
},
}, {
desc: "version 0 - error",
response: &FindCoordinatorResponse{
Version: 0,
Err: ErrConsumerCoordinatorNotAvailable,
Coordinator: NoNode,
},
encoded: []byte{
0, 15, // Err
255, 255, 255, 255, // Coordinator.ID: -1
0, 0, // Coordinator.Host: ""
255, 255, 255, 255, // Coordinator.Port: -1
},
}, {
desc: "version 1 - error",
response: &FindCoordinatorResponse{
Version: 1,
ThrottleTime: 100 * time.Millisecond,
Err: ErrConsumerCoordinatorNotAvailable,
ErrMsg: &errMsg,
Coordinator: NoNode,
},
encoded: []byte{
0, 0, 0, 100, // ThrottleTime
0, 15, // Err
0, 6, 'k', 'a', 'b', 'o', 'o', 'm', // ErrMsg
255, 255, 255, 255, // Coordinator.ID: -1
0, 0, // Coordinator.Host: ""
255, 255, 255, 255, // Coordinator.Port: -1
},
}} {
testResponse(t, tc.desc, tc.response, tc.encoded)
}
}

View file

@ -1,90 +0,0 @@
package sarama
import (
"fmt"
"testing"
"time"
)
func TestFuncConnectionFailure(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
Proxies["kafka1"].Enabled = false
SaveProxy(t, "kafka1")
config := NewConfig()
config.Metadata.Retry.Max = 1
_, err := NewClient([]string{kafkaBrokers[0]}, config)
if err != ErrOutOfBrokers {
t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err)
}
}
func TestFuncClientMetadata(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Metadata.Retry.Backoff = 10 * time.Millisecond
client, err := NewClient(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, got", err)
}
partitions, err := client.Partitions("test.4")
if err != nil {
t.Error(err)
}
if len(partitions) != 4 {
t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions)
}
partitions, err = client.Partitions("test.1")
if err != nil {
t.Error(err)
}
if len(partitions) != 1 {
t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions)
}
safeClose(t, client)
}
func TestFuncClientCoordinator(t *testing.T) {
checkKafkaVersion(t, "0.8.2")
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
client, err := NewClient(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i))
if err != nil {
t.Fatal(err)
}
if connected, err := broker.Connected(); !connected || err != nil {
t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr())
}
}
safeClose(t, client)
}

View file

@ -1,226 +0,0 @@
package sarama
import (
"fmt"
"math"
"os"
"sort"
"sync"
"testing"
"time"
)
func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
consumer, err := NewConsumer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange {
t.Error("Expected ErrOffsetOutOfRange, got:", err)
}
if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange {
t.Error("Expected ErrOffsetOutOfRange, got:", err)
}
safeClose(t, consumer)
}
func TestConsumerHighWaterMarkOffset(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
p, err := NewSyncProducer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, p)
_, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")})
if err != nil {
t.Fatal(err)
}
c, err := NewConsumer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, c)
pc, err := c.ConsumePartition("test.1", 0, offset)
if err != nil {
t.Fatal(err)
}
<-pc.Messages()
if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 {
t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo)
}
safeClose(t, pc)
}
// Makes sure that messages produced by all supported client versions/
// compression codecs (except LZ4) combinations can be consumed by all
// supported consumer versions. It relies on the KAFKA_VERSION environment
// variable to provide the version of the test Kafka cluster.
//
// Note that LZ4 codec was introduced in v0.10.0.0 and therefore is excluded
// from this test case. It has a similar version matrix test case below that
// only checks versions from v0.10.0.0 until KAFKA_VERSION.
func TestVersionMatrix(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
// Produce lot's of message with all possible combinations of supported
// protocol versions and compressions for the except of LZ4.
testVersions := versionRange(V0_8_2_0)
allCodecsButLZ4 := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy}
producedMessages := produceMsgs(t, testVersions, allCodecsButLZ4, 17, 100)
// When/Then
consumeMsgs(t, testVersions, producedMessages)
}
// Support for LZ4 codec was introduced in v0.10.0.0 so a version matrix to
// test LZ4 should start with v0.10.0.0.
func TestVersionMatrixLZ4(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
// Produce lot's of message with all possible combinations of supported
// protocol versions starting with v0.10 (first where LZ4 was supported)
// and all possible compressions.
testVersions := versionRange(V0_10_0_0)
allCodecs := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy, CompressionLZ4}
producedMessages := produceMsgs(t, testVersions, allCodecs, 17, 100)
// When/Then
consumeMsgs(t, testVersions, producedMessages)
}
func prodMsg2Str(prodMsg *ProducerMessage) string {
return fmt.Sprintf("{offset: %d, value: %s}", prodMsg.Offset, string(prodMsg.Value.(StringEncoder)))
}
func consMsg2Str(consMsg *ConsumerMessage) string {
return fmt.Sprintf("{offset: %d, value: %s}", consMsg.Offset, string(consMsg.Value))
}
func versionRange(lower KafkaVersion) []KafkaVersion {
// Get the test cluster version from the environment. If there is nothing
// there then assume the highest.
upper, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION"))
if err != nil {
upper = MaxVersion
}
versions := make([]KafkaVersion, 0, len(SupportedVersions))
for _, v := range SupportedVersions {
if !v.IsAtLeast(lower) {
continue
}
if !upper.IsAtLeast(v) {
return versions
}
versions = append(versions, v)
}
return versions
}
func produceMsgs(t *testing.T, clientVersions []KafkaVersion, codecs []CompressionCodec, flush int, countPerVerCodec int) []*ProducerMessage {
var wg sync.WaitGroup
var producedMessagesMu sync.Mutex
var producedMessages []*ProducerMessage
for _, prodVer := range clientVersions {
for _, codec := range codecs {
prodCfg := NewConfig()
prodCfg.Version = prodVer
prodCfg.Producer.Return.Successes = true
prodCfg.Producer.Return.Errors = true
prodCfg.Producer.Flush.MaxMessages = flush
prodCfg.Producer.Compression = codec
p, err := NewSyncProducer(kafkaBrokers, prodCfg)
if err != nil {
t.Errorf("Failed to create producer: version=%s, compression=%s, err=%v", prodVer, codec, err)
continue
}
defer safeClose(t, p)
for i := 0; i < countPerVerCodec; i++ {
msg := &ProducerMessage{
Topic: "test.1",
Value: StringEncoder(fmt.Sprintf("msg:%s:%s:%d", prodVer, codec, i)),
}
wg.Add(1)
go func() {
defer wg.Done()
_, _, err := p.SendMessage(msg)
if err != nil {
t.Errorf("Failed to produce message: %s, err=%v", msg.Value, err)
}
producedMessagesMu.Lock()
producedMessages = append(producedMessages, msg)
producedMessagesMu.Unlock()
}()
}
}
}
wg.Wait()
// Sort produced message in ascending offset order.
sort.Slice(producedMessages, func(i, j int) bool {
return producedMessages[i].Offset < producedMessages[j].Offset
})
t.Logf("*** Total produced %d, firstOffset=%d, lastOffset=%d\n",
len(producedMessages), producedMessages[0].Offset, producedMessages[len(producedMessages)-1].Offset)
return producedMessages
}
func consumeMsgs(t *testing.T, clientVersions []KafkaVersion, producedMessages []*ProducerMessage) {
// Consume all produced messages with all client versions supported by the
// cluster.
consumerVersionLoop:
for _, consVer := range clientVersions {
t.Logf("*** Consuming with client version %s\n", consVer)
// Create a partition consumer that should start from the first produced
// message.
consCfg := NewConfig()
consCfg.Version = consVer
c, err := NewConsumer(kafkaBrokers, consCfg)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, c)
pc, err := c.ConsumePartition("test.1", 0, producedMessages[0].Offset)
if err != nil {
t.Fatal(err)
}
defer safeClose(t, pc)
// Consume as many messages as there have been produced and make sure that
// order is preserved.
for i, prodMsg := range producedMessages {
select {
case consMsg := <-pc.Messages():
if consMsg.Offset != prodMsg.Offset {
t.Errorf("Consumed unexpected offset: version=%s, index=%d, want=%s, got=%s",
consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg))
continue consumerVersionLoop
}
if string(consMsg.Value) != string(prodMsg.Value.(StringEncoder)) {
t.Errorf("Consumed unexpected msg: version=%s, index=%d, want=%s, got=%s",
consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg))
continue consumerVersionLoop
}
case <-time.After(3 * time.Second):
t.Fatalf("Timeout waiting for: index=%d, offset=%d, msg=%s", i, prodMsg.Offset, prodMsg.Value)
}
}
}
}

View file

@ -1,47 +0,0 @@
package sarama
import (
"testing"
)
func TestFuncOffsetManager(t *testing.T) {
checkKafkaVersion(t, "0.8.2")
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
client, err := NewClient(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client)
if err != nil {
t.Fatal(err)
}
pom1, err := offsetManager.ManagePartition("test.1", 0)
if err != nil {
t.Fatal(err)
}
pom1.MarkOffset(10, "test metadata")
safeClose(t, pom1)
pom2, err := offsetManager.ManagePartition("test.1", 0)
if err != nil {
t.Fatal(err)
}
offset, metadata := pom2.NextOffset()
if offset != 10 {
t.Errorf("Expected the next offset to be 10, found %d.", offset)
}
if metadata != "test metadata" {
t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata)
}
safeClose(t, pom2)
safeClose(t, offsetManager)
safeClose(t, client)
}

View file

@ -1,323 +0,0 @@
package sarama
import (
"fmt"
"os"
"sync"
"testing"
"time"
toxiproxy "github.com/Shopify/toxiproxy/client"
"github.com/rcrowley/go-metrics"
)
const TestBatchSize = 1000
func TestFuncProducing(t *testing.T) {
config := NewConfig()
testProducingMessages(t, config)
}
func TestFuncProducingGzip(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionGZIP
testProducingMessages(t, config)
}
func TestFuncProducingSnappy(t *testing.T) {
config := NewConfig()
config.Producer.Compression = CompressionSnappy
testProducingMessages(t, config)
}
func TestFuncProducingNoResponse(t *testing.T) {
config := NewConfig()
config.Producer.RequiredAcks = NoResponse
testProducingMessages(t, config)
}
func TestFuncProducingFlushing(t *testing.T) {
config := NewConfig()
config.Producer.Flush.Messages = TestBatchSize / 8
config.Producer.Flush.Frequency = 250 * time.Millisecond
testProducingMessages(t, config)
}
func TestFuncMultiPartitionProduce(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
config := NewConfig()
config.ChannelBufferSize = 20
config.Producer.Flush.Frequency = 50 * time.Millisecond
config.Producer.Flush.Messages = 200
config.Producer.Return.Successes = true
producer, err := NewSyncProducer(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
wg.Add(TestBatchSize)
for i := 1; i <= TestBatchSize; i++ {
go func(i int) {
defer wg.Done()
msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))}
if _, _, err := producer.SendMessage(msg); err != nil {
t.Error(i, err)
}
}(i)
}
wg.Wait()
if err := producer.Close(); err != nil {
t.Error(err)
}
}
func TestFuncProducingToInvalidTopic(t *testing.T) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
producer, err := NewSyncProducer(kafkaBrokers, nil)
if err != nil {
t.Fatal(err)
}
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition {
t.Error("Expected ErrUnknownTopicOrPartition, found", err)
}
safeClose(t, producer)
}
func testProducingMessages(t *testing.T, config *Config) {
setupFunctionalTest(t)
defer teardownFunctionalTest(t)
// Configure some latency in order to properly validate the request latency metric
for _, proxy := range Proxies {
if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil {
t.Fatal("Unable to configure latency toxicity", err)
}
}
config.Producer.Return.Successes = true
config.Consumer.Return.Errors = true
client, err := NewClient(kafkaBrokers, config)
if err != nil {
t.Fatal(err)
}
// Keep in mind the current offset
initialOffset, err := client.GetOffset("test.1", 0, OffsetNewest)
if err != nil {
t.Fatal(err)
}
producer, err := NewAsyncProducerFromClient(client)
if err != nil {
t.Fatal(err)
}
expectedResponses := TestBatchSize
for i := 1; i <= TestBatchSize; {
msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))}
select {
case producer.Input() <- msg:
i++
case ret := <-producer.Errors():
t.Fatal(ret.Err)
case <-producer.Successes():
expectedResponses--
}
}
for expectedResponses > 0 {
select {
case ret := <-producer.Errors():
t.Fatal(ret.Err)
case <-producer.Successes():
expectedResponses--
}
}
safeClose(t, producer)
// Validate producer metrics before using the consumer minus the offset request
validateMetrics(t, client)
master, err := NewConsumerFromClient(client)
if err != nil {
t.Fatal(err)
}
consumer, err := master.ConsumePartition("test.1", 0, initialOffset)
if err != nil {
t.Fatal(err)
}
for i := 1; i <= TestBatchSize; i++ {
select {
case <-time.After(10 * time.Second):
t.Fatal("Not received any more events in the last 10 seconds.")
case err := <-consumer.Errors():
t.Error(err)
case message := <-consumer.Messages():
if string(message.Value) != fmt.Sprintf("testing %d", i) {
t.Fatalf("Unexpected message with index %d: %s", i, message.Value)
}
}
}
safeClose(t, consumer)
safeClose(t, client)
}
func validateMetrics(t *testing.T, client Client) {
// Get the broker used by test1 topic
var broker *Broker
if partitions, err := client.Partitions("test.1"); err != nil {
t.Error(err)
} else {
for _, partition := range partitions {
if b, err := client.Leader("test.1", partition); err != nil {
t.Error(err)
} else {
if broker != nil && b != broker {
t.Fatal("Expected only one broker, got at least 2")
}
broker = b
}
}
}
metricValidators := newMetricValidators()
noResponse := client.Config().Producer.RequiredAcks == NoResponse
compressionEnabled := client.Config().Producer.Compression != CompressionNone
// We are adding 10ms of latency to all requests with toxiproxy
minRequestLatencyInMs := 10
if noResponse {
// but when we do not wait for a response it can be less than 1ms
minRequestLatencyInMs = 0
}
// We read at least 1 byte from the broker
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("incoming-byte-rate", 1))
// in at least 3 global requests (1 for metadata request, 1 for offset request and N for produce request)
metricValidators.register(minCountMeterValidator("request-rate", 3))
metricValidators.register(minCountHistogramValidator("request-size", 3))
metricValidators.register(minValHistogramValidator("request-size", 1))
metricValidators.register(minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
// and at least 2 requests to the registered broker (offset + produces)
metricValidators.registerForBroker(broker, minCountMeterValidator("request-rate", 2))
metricValidators.registerForBroker(broker, minCountHistogramValidator("request-size", 2))
metricValidators.registerForBroker(broker, minValHistogramValidator("request-size", 1))
metricValidators.registerForBroker(broker, minValHistogramValidator("request-latency-in-ms", minRequestLatencyInMs))
// We send at least 1 batch
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("batch-size", 1))
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("batch-size", 1))
if compressionEnabled {
// We record compression ratios between [0.50,-10.00] (50-1000 with a histogram) for at least one "fake" record
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("compression-ratio", 1))
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 50))
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 1000))
} else {
// We record compression ratios of 1.00 (100 with a histogram) for every TestBatchSize record
metricValidators.registerForGlobalAndTopic("test_1", countHistogramValidator("compression-ratio", TestBatchSize))
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("compression-ratio", 100))
metricValidators.registerForGlobalAndTopic("test_1", maxValHistogramValidator("compression-ratio", 100))
}
// We send exactly TestBatchSize messages
metricValidators.registerForGlobalAndTopic("test_1", countMeterValidator("record-send-rate", TestBatchSize))
// We send at least one record per request
metricValidators.registerForGlobalAndTopic("test_1", minCountHistogramValidator("records-per-request", 1))
metricValidators.registerForGlobalAndTopic("test_1", minValHistogramValidator("records-per-request", 1))
// We receive at least 1 byte from the broker
metricValidators.registerForAllBrokers(broker, minCountMeterValidator("outgoing-byte-rate", 1))
if noResponse {
// in exactly 2 global responses (metadata + offset)
metricValidators.register(countMeterValidator("response-rate", 2))
metricValidators.register(minCountHistogramValidator("response-size", 2))
metricValidators.register(minValHistogramValidator("response-size", 1))
// and exactly 1 offset response for the registered broker
metricValidators.registerForBroker(broker, countMeterValidator("response-rate", 1))
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 1))
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
} else {
// in at least 3 global responses (metadata + offset + produces)
metricValidators.register(minCountMeterValidator("response-rate", 3))
metricValidators.register(minCountHistogramValidator("response-size", 3))
metricValidators.register(minValHistogramValidator("response-size", 1))
// and at least 2 for the registered broker
metricValidators.registerForBroker(broker, minCountMeterValidator("response-rate", 2))
metricValidators.registerForBroker(broker, minCountHistogramValidator("response-size", 2))
metricValidators.registerForBroker(broker, minValHistogramValidator("response-size", 1))
}
// Run the validators
metricValidators.run(t, client.Config().MetricRegistry)
}
// Benchmarks
func BenchmarkProducerSmall(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128)))
}
func BenchmarkProducerMedium(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024)))
}
func BenchmarkProducerLarge(b *testing.B) {
benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192)))
}
func BenchmarkProducerSmallSinglePartition(b *testing.B) {
benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128)))
}
func BenchmarkProducerMediumSnappy(b *testing.B) {
conf := NewConfig()
conf.Producer.Compression = CompressionSnappy
benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024)))
}
func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) {
setupFunctionalTest(b)
defer teardownFunctionalTest(b)
metricsDisable := os.Getenv("METRICS_DISABLE")
if metricsDisable != "" {
previousUseNilMetrics := metrics.UseNilMetrics
Logger.Println("Disabling metrics using no-op implementation")
metrics.UseNilMetrics = true
// Restore previous setting
defer func() {
metrics.UseNilMetrics = previousUseNilMetrics
}()
}
producer, err := NewAsyncProducer(kafkaBrokers, conf)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 1; i <= b.N; {
msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value}
select {
case producer.Input() <- msg:
i++
case ret := <-producer.Errors():
b.Fatal(ret.Err)
}
}
safeClose(b, producer)
}

View file

@ -1,148 +0,0 @@
package sarama
import (
"log"
"math/rand"
"net"
"os"
"strconv"
"strings"
"testing"
"time"
toxiproxy "github.com/Shopify/toxiproxy/client"
)
const (
VagrantToxiproxy = "http://192.168.100.67:8474"
VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095"
VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185"
)
var (
kafkaAvailable, kafkaRequired bool
kafkaBrokers []string
proxyClient *toxiproxy.Client
Proxies map[string]*toxiproxy.Proxy
ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"}
KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"}
)
func init() {
if os.Getenv("DEBUG") == "true" {
Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
seed := time.Now().UTC().UnixNano()
if tmp := os.Getenv("TEST_SEED"); tmp != "" {
seed, _ = strconv.ParseInt(tmp, 0, 64)
}
Logger.Println("Using random seed:", seed)
rand.Seed(seed)
proxyAddr := os.Getenv("TOXIPROXY_ADDR")
if proxyAddr == "" {
proxyAddr = VagrantToxiproxy
}
proxyClient = toxiproxy.NewClient(proxyAddr)
kafkaPeers := os.Getenv("KAFKA_PEERS")
if kafkaPeers == "" {
kafkaPeers = VagrantKafkaPeers
}
kafkaBrokers = strings.Split(kafkaPeers, ",")
if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil {
if err = c.Close(); err == nil {
kafkaAvailable = true
}
}
kafkaRequired = os.Getenv("CI") != ""
}
func checkKafkaAvailability(t testing.TB) {
if !kafkaAvailable {
if kafkaRequired {
t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
} else {
t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0])
}
}
}
func checkKafkaVersion(t testing.TB, requiredVersion string) {
kafkaVersion := os.Getenv("KAFKA_VERSION")
if kafkaVersion == "" {
t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion)
} else {
available := parseKafkaVersion(kafkaVersion)
required := parseKafkaVersion(requiredVersion)
if !available.satisfies(required) {
t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion)
}
}
}
func resetProxies(t testing.TB) {
if err := proxyClient.ResetState(); err != nil {
t.Error(err)
}
Proxies = nil
}
func fetchProxies(t testing.TB) {
var err error
Proxies, err = proxyClient.Proxies()
if err != nil {
t.Fatal(err)
}
}
func SaveProxy(t *testing.T, px string) {
if err := Proxies[px].Save(); err != nil {
t.Fatal(err)
}
}
func setupFunctionalTest(t testing.TB) {
checkKafkaAvailability(t)
resetProxies(t)
fetchProxies(t)
}
func teardownFunctionalTest(t testing.TB) {
resetProxies(t)
}
type kafkaVersion []int
func (kv kafkaVersion) satisfies(other kafkaVersion) bool {
var ov int
for index, v := range kv {
if len(other) <= index {
ov = 0
} else {
ov = other[index]
}
if v < ov {
return false
} else if v > ov {
return true
}
}
return true
}
func parseKafkaVersion(version string) kafkaVersion {
numbers := strings.Split(version, ".")
result := make(kafkaVersion, 0, len(numbers))
for _, number := range numbers {
nr, _ := strconv.Atoi(number)
result = append(result, nr)
}
return result
}

View file

@ -1,21 +0,0 @@
package sarama
import "testing"
var (
basicHeartbeatRequest = []byte{
0, 3, 'f', 'o', 'o', // Group ID
0x00, 0x01, 0x02, 0x03, // Generatiuon ID
0, 3, 'b', 'a', 'z', // Member ID
}
)
func TestHeartbeatRequest(t *testing.T) {
var request *HeartbeatRequest
request = new(HeartbeatRequest)
request.GroupId = "foo"
request.GenerationId = 66051
request.MemberId = "baz"
testRequest(t, "basic", request, basicHeartbeatRequest)
}

View file

@ -1,18 +0,0 @@
package sarama
import "testing"
var (
heartbeatResponseNoError = []byte{
0x00, 0x00}
)
func TestHeartbeatResponse(t *testing.T) {
var response *HeartbeatResponse
response = new(HeartbeatResponse)
testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0)
if response.Err != ErrNoError {
t.Error("Decoding error failed: no error expected but found", response.Err)
}
}

View file

@ -1,31 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
initProducerIDRequestNull = []byte{
255, 255,
0, 0, 0, 100,
}
initProducerIDRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 100,
}
)
func TestInitProducerIDRequest(t *testing.T) {
req := &InitProducerIDRequest{
TransactionTimeout: 100 * time.Millisecond,
}
testRequest(t, "null transaction id", req, initProducerIDRequestNull)
transactionID := "txn"
req.TransactionalID = &transactionID
testRequest(t, "transaction id", req, initProducerIDRequest)
}

View file

@ -1,37 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
initProducerIDResponse = []byte{
0, 0, 0, 100,
0, 0,
0, 0, 0, 0, 0, 0, 31, 64, // producerID = 8000
0, 0, // epoch
}
initProducerIDRequestError = []byte{
0, 0, 0, 100,
0, 51,
255, 255, 255, 255, 255, 255, 255, 255,
0, 0,
}
)
func TestInitProducerIDResponse(t *testing.T) {
resp := &InitProducerIDResponse{
ThrottleTime: 100 * time.Millisecond,
ProducerID: 8000,
ProducerEpoch: 0,
}
testResponse(t, "", resp, initProducerIDResponse)
resp.Err = ErrConcurrentTransactions
resp.ProducerID = -1
testResponse(t, "with error", resp, initProducerIDRequestError)
}

View file

@ -1,83 +0,0 @@
package sarama
import "testing"
var (
joinGroupRequestV0_NoProtocols = []byte{
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
0, 0, 0, 100, // Session timeout
0, 0, // Member ID
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
0, 0, 0, 0, // 0 protocol groups
}
joinGroupRequestV0_OneProtocol = []byte{
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
0, 0, 0, 100, // Session timeout
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
0, 0, 0, 1, // 1 group protocol
0, 3, 'o', 'n', 'e', // Protocol name
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
}
joinGroupRequestV1 = []byte{
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
0, 0, 0, 100, // Session timeout
0, 0, 0, 200, // Rebalance timeout
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
0, 0, 0, 1, // 1 group protocol
0, 3, 'o', 'n', 'e', // Protocol name
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
}
)
func TestJoinGroupRequest(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.ProtocolType = "consumer"
testRequest(t, "V0: no protocols", request, joinGroupRequestV0_NoProtocols)
}
func TestJoinGroupRequestV0_OneProtocol(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol)
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
testRequestDecode(t, "V0: one protocol", request, packet)
}
func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
request := new(JoinGroupRequest)
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol)
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
testRequestDecode(t, "V0: one protocol", request, packet)
}
func TestJoinGroupRequestV1(t *testing.T) {
request := new(JoinGroupRequest)
request.Version = 1
request.GroupId = "TestGroup"
request.SessionTimeout = 100
request.RebalanceTimeout = 200
request.MemberId = "OneProtocol"
request.ProtocolType = "consumer"
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
packet := testRequestEncode(t, "V1", request, joinGroupRequestV1)
request.GroupProtocols = make(map[string][]byte)
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
testRequestDecode(t, "V1", request, packet)
}

View file

@ -1,172 +0,0 @@
package sarama
import (
"reflect"
"testing"
)
var (
joinGroupResponseV0_NoError = []byte{
0x00, 0x00, // No error
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
0, 3, 'f', 'o', 'o', // Leader ID
0, 3, 'b', 'a', 'r', // Member ID
0, 0, 0, 0, // No member info
}
joinGroupResponseV0_WithError = []byte{
0, 23, // Error: inconsistent group protocol
0x00, 0x00, 0x00, 0x00, // Generation ID
0, 0, // Protocol name chosen
0, 0, // Leader ID
0, 0, // Member ID
0, 0, 0, 0, // No member info
}
joinGroupResponseV0_Leader = []byte{
0x00, 0x00, // No error
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
0, 3, 'f', 'o', 'o', // Leader ID
0, 3, 'f', 'o', 'o', // Member ID == Leader ID
0, 0, 0, 1, // 1 member
0, 3, 'f', 'o', 'o', // Member ID
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata
}
joinGroupResponseV1 = []byte{
0x00, 0x00, // No error
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
0, 3, 'f', 'o', 'o', // Leader ID
0, 3, 'b', 'a', 'r', // Member ID
0, 0, 0, 0, // No member info
}
joinGroupResponseV2 = []byte{
0, 0, 0, 100,
0x00, 0x00, // No error
0x00, 0x01, 0x02, 0x03, // Generation ID
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
0, 3, 'f', 'o', 'o', // Leader ID
0, 3, 'b', 'a', 'r', // Member ID
0, 0, 0, 0, // No member info
}
)
func TestJoinGroupResponseV0(t *testing.T) {
var response *JoinGroupResponse
response = new(JoinGroupResponse)
testVersionDecodable(t, "no error", response, joinGroupResponseV0_NoError, 0)
if response.Err != ErrNoError {
t.Error("Decoding Err failed: no error expected but found", response.Err)
}
if response.GenerationId != 66051 {
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
}
if response.LeaderId != "foo" {
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
}
if response.MemberId != "bar" {
t.Error("Decoding MemberId failed, found:", response.MemberId)
}
if len(response.Members) != 0 {
t.Error("Decoding Members failed, found:", response.Members)
}
response = new(JoinGroupResponse)
testVersionDecodable(t, "with error", response, joinGroupResponseV0_WithError, 0)
if response.Err != ErrInconsistentGroupProtocol {
t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err)
}
if response.GenerationId != 0 {
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
}
if response.LeaderId != "" {
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
}
if response.MemberId != "" {
t.Error("Decoding MemberId failed, found:", response.MemberId)
}
if len(response.Members) != 0 {
t.Error("Decoding Members failed, found:", response.Members)
}
response = new(JoinGroupResponse)
testVersionDecodable(t, "with error", response, joinGroupResponseV0_Leader, 0)
if response.Err != ErrNoError {
t.Error("Decoding Err failed: ErrNoError expected but found", response.Err)
}
if response.GenerationId != 66051 {
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
}
if response.LeaderId != "foo" {
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
}
if response.MemberId != "foo" {
t.Error("Decoding MemberId failed, found:", response.MemberId)
}
if len(response.Members) != 1 {
t.Error("Decoding Members failed, found:", response.Members)
}
if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) {
t.Error("Decoding foo member failed, found:", response.Members["foo"])
}
}
func TestJoinGroupResponseV1(t *testing.T) {
response := new(JoinGroupResponse)
testVersionDecodable(t, "no error", response, joinGroupResponseV1, 1)
if response.Err != ErrNoError {
t.Error("Decoding Err failed: no error expected but found", response.Err)
}
if response.GenerationId != 66051 {
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
}
if response.GroupProtocol != "protocol" {
t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol)
}
if response.LeaderId != "foo" {
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
}
if response.MemberId != "bar" {
t.Error("Decoding MemberId failed, found:", response.MemberId)
}
if response.Version != 1 {
t.Error("Decoding Version failed, found:", response.Version)
}
if len(response.Members) != 0 {
t.Error("Decoding Members failed, found:", response.Members)
}
}
func TestJoinGroupResponseV2(t *testing.T) {
response := new(JoinGroupResponse)
testVersionDecodable(t, "no error", response, joinGroupResponseV2, 2)
if response.ThrottleTime != 100 {
t.Error("Decoding ThrottleTime failed, found:", response.ThrottleTime)
}
if response.Err != ErrNoError {
t.Error("Decoding Err failed: no error expected but found", response.Err)
}
if response.GenerationId != 66051 {
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
}
if response.GroupProtocol != "protocol" {
t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol)
}
if response.LeaderId != "foo" {
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
}
if response.MemberId != "bar" {
t.Error("Decoding MemberId failed, found:", response.MemberId)
}
if response.Version != 2 {
t.Error("Decoding Version failed, found:", response.Version)
}
if len(response.Members) != 0 {
t.Error("Decoding Members failed, found:", response.Members)
}
}

View file

@ -1,19 +0,0 @@
package sarama
import "testing"
var (
basicLeaveGroupRequest = []byte{
0, 3, 'f', 'o', 'o',
0, 3, 'b', 'a', 'r',
}
)
func TestLeaveGroupRequest(t *testing.T) {
var request *LeaveGroupRequest
request = new(LeaveGroupRequest)
request.GroupId = "foo"
request.MemberId = "bar"
testRequest(t, "basic", request, basicLeaveGroupRequest)
}

View file

@ -1,24 +0,0 @@
package sarama
import "testing"
var (
leaveGroupResponseNoError = []byte{0x00, 0x00}
leaveGroupResponseWithError = []byte{0, 25}
)
func TestLeaveGroupResponse(t *testing.T) {
var response *LeaveGroupResponse
response = new(LeaveGroupResponse)
testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0)
if response.Err != ErrNoError {
t.Error("Decoding error failed: no error expected but found", response.Err)
}
response = new(LeaveGroupResponse)
testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0)
if response.Err != ErrUnknownMemberId {
t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err)
}
}

View file

@ -1,7 +0,0 @@
package sarama
import "testing"
func TestListGroupsRequest(t *testing.T) {
testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{})
}

View file

@ -1,58 +0,0 @@
package sarama
import (
"testing"
)
var (
listGroupsResponseEmpty = []byte{
0, 0, // no error
0, 0, 0, 0, // no groups
}
listGroupsResponseError = []byte{
0, 31, // no error
0, 0, 0, 0, // ErrClusterAuthorizationFailed
}
listGroupsResponseWithConsumer = []byte{
0, 0, // no error
0, 0, 0, 1, // 1 group
0, 3, 'f', 'o', 'o', // group name
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type
}
)
func TestListGroupsResponse(t *testing.T) {
var response *ListGroupsResponse
response = new(ListGroupsResponse)
testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0)
if response.Err != ErrNoError {
t.Error("Expected no gerror, found:", response.Err)
}
if len(response.Groups) != 0 {
t.Error("Expected no groups")
}
response = new(ListGroupsResponse)
testVersionDecodable(t, "no error", response, listGroupsResponseError, 0)
if response.Err != ErrClusterAuthorizationFailed {
t.Error("Expected no gerror, found:", response.Err)
}
if len(response.Groups) != 0 {
t.Error("Expected no groups")
}
response = new(ListGroupsResponse)
testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0)
if response.Err != ErrNoError {
t.Error("Expected no gerror, found:", response.Err)
}
if len(response.Groups) != 1 {
t.Error("Expected one group")
}
if response.Groups["foo"] != "consumer" {
t.Error("Expected foo group to use consumer protocol")
}
}

View file

@ -1,196 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
emptyMessage = []byte{
167, 236, 104, 3, // CRC
0x00, // magic version byte
0x00, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV1Message = []byte{
204, 47, 121, 217, // CRC
0x01, // magic version byte
0x00, // attribute flags
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timestamp
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyV2Message = []byte{
167, 236, 104, 3, // CRC
0x02, // magic version byte
0x00, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0xFF, 0xFF, 0xFF, 0xFF} // value
emptyGzipMessage = []byte{
132, 99, 80, 148, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
// value
0x00, 0x00, 0x00, 0x17,
0x1f, 0x8b,
0x08,
0, 0, 0, 0, 0, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
emptyLZ4Message = []byte{
132, 219, 238, 101, // CRC
0x01, // version byte
0x03, // attribute flags: lz4
0, 0, 1, 88, 141, 205, 89, 56, // timestamp
0xFF, 0xFF, 0xFF, 0xFF, // key
0x00, 0x00, 0x00, 0x0f, // len
0x04, 0x22, 0x4D, 0x18, // LZ4 magic number
100, // LZ4 flags: version 01, block indepedant, content checksum
112, 185, 0, 0, 0, 0, // LZ4 data
5, 93, 204, 2, // LZ4 checksum
}
emptyBulkSnappyMessage = []byte{
180, 47, 53, 209, //CRC
0x00, // magic version byte
0x02, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0, 0, 0, 42,
130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic
0, 0, 0, 1, // min version
0, 0, 0, 1, // default version
0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0}
emptyBulkGzipMessage = []byte{
139, 160, 63, 141, //CRC
0x00, // magic version byte
0x01, // attribute flags
0xFF, 0xFF, 0xFF, 0xFF, // key
0x00, 0x00, 0x00, 0x27, // len
0x1f, 0x8b, // Gzip Magic
0x08, // deflate compressed
0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0}
emptyBulkLZ4Message = []byte{
246, 12, 188, 129, // CRC
0x01, // Version
0x03, // attribute flags (LZ4)
255, 255, 249, 209, 212, 181, 73, 201, // timestamp
0xFF, 0xFF, 0xFF, 0xFF, // key
0x00, 0x00, 0x00, 0x47, // len
0x04, 0x22, 0x4D, 0x18, // magic number lz4
100, // lz4 flags 01100100
// version: 01, block indep: 1, block checksum: 0, content size: 0, content checksum: 1, reserved: 00
112, 185, 52, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 14, 121, 87, 72, 224, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0,
71, 129, 23, 111, // LZ4 checksum
}
)
func TestMessageEncoding(t *testing.T) {
message := Message{}
testEncodable(t, "empty", &message, emptyMessage)
message.Value = []byte{}
message.Codec = CompressionGZIP
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
message.Value = []byte{}
message.Codec = CompressionLZ4
message.Timestamp = time.Unix(1479847795, 0)
message.Version = 1
testEncodable(t, "empty lz4", &message, emptyLZ4Message)
}
func TestMessageDecoding(t *testing.T) {
message := Message{}
testDecodable(t, "empty", &message, emptyMessage)
if message.Codec != CompressionNone {
t.Error("Decoding produced compression codec where there was none.")
}
if message.Key != nil {
t.Error("Decoding produced key where there was none.")
}
if message.Value != nil {
t.Error("Decoding produced value where there was none.")
}
if message.Set != nil {
t.Error("Decoding produced set where there was none.")
}
testDecodable(t, "empty gzip", &message, emptyGzipMessage)
if message.Codec != CompressionGZIP {
t.Error("Decoding produced incorrect compression codec (was gzip).")
}
if message.Key != nil {
t.Error("Decoding produced key where there was none.")
}
if message.Value == nil || len(message.Value) != 0 {
t.Error("Decoding produced nil or content-ful value where there was an empty array.")
}
}
func TestMessageDecodingBulkSnappy(t *testing.T) {
message := Message{}
testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage)
if message.Codec != CompressionSnappy {
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy)
}
if message.Key != nil {
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
}
if message.Set == nil {
t.Error("Decoding produced no set, but one was expected.")
} else if len(message.Set.Messages) != 2 {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
func TestMessageDecodingBulkGzip(t *testing.T) {
message := Message{}
testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage)
if message.Codec != CompressionGZIP {
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP)
}
if message.Key != nil {
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
}
if message.Set == nil {
t.Error("Decoding produced no set, but one was expected.")
} else if len(message.Set.Messages) != 2 {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
func TestMessageDecodingBulkLZ4(t *testing.T) {
message := Message{}
testDecodable(t, "bulk lz4", &message, emptyBulkLZ4Message)
if message.Codec != CompressionLZ4 {
t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionLZ4)
}
if message.Key != nil {
t.Errorf("Decoding produced key %+v, but none was expected.", message.Key)
}
if message.Set == nil {
t.Error("Decoding produced no set, but one was expected.")
} else if len(message.Set.Messages) != 2 {
t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages))
}
}
func TestMessageDecodingVersion1(t *testing.T) {
message := Message{Version: 1}
testDecodable(t, "decoding empty v1 message", &message, emptyV1Message)
}
func TestMessageDecodingUnknownVersions(t *testing.T) {
message := Message{Version: 2}
err := decode(emptyV2Message, &message)
if err == nil {
t.Error("Decoding did not produce an error for an unknown magic byte")
}
if err.Error() != "kafka: error decoding packet: unknown magic byte (2)" {
t.Error("Decoding an unknown magic byte produced an unknown error ", err)
}
}

View file

@ -1,76 +0,0 @@
package sarama
import "testing"
var (
metadataRequestNoTopicsV0 = []byte{
0x00, 0x00, 0x00, 0x00}
metadataRequestOneTopicV0 = []byte{
0x00, 0x00, 0x00, 0x01,
0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
metadataRequestThreeTopicsV0 = []byte{
0x00, 0x00, 0x00, 0x03,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x03, 'b', 'a', 'z'}
metadataRequestNoTopicsV1 = []byte{
0xff, 0xff, 0xff, 0xff}
metadataRequestAutoCreateV4 = append(metadataRequestOneTopicV0, byte(1))
metadataRequestNoAutoCreateV4 = append(metadataRequestOneTopicV0, byte(0))
)
func TestMetadataRequestV0(t *testing.T) {
request := new(MetadataRequest)
testRequest(t, "no topics", request, metadataRequestNoTopicsV0)
request.Topics = []string{"topic1"}
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
request.Topics = []string{"foo", "bar", "baz"}
testRequest(t, "three topics", request, metadataRequestThreeTopicsV0)
}
func TestMetadataRequestV1(t *testing.T) {
request := new(MetadataRequest)
request.Version = 1
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
request.Topics = []string{"topic1"}
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
request.Topics = []string{"foo", "bar", "baz"}
testRequest(t, "three topics", request, metadataRequestThreeTopicsV0)
}
func TestMetadataRequestV2(t *testing.T) {
request := new(MetadataRequest)
request.Version = 2
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
request.Topics = []string{"topic1"}
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
}
func TestMetadataRequestV3(t *testing.T) {
request := new(MetadataRequest)
request.Version = 3
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
request.Topics = []string{"topic1"}
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
}
func TestMetadataRequestV4(t *testing.T) {
request := new(MetadataRequest)
request.Version = 4
request.Topics = []string{"topic1"}
request.AllowAutoTopicCreation = true
testRequest(t, "one topic", request, metadataRequestAutoCreateV4)
request.AllowAutoTopicCreation = false
testRequest(t, "one topic", request, metadataRequestNoAutoCreateV4)
}

View file

@ -1,290 +0,0 @@
package sarama
import "testing"
var (
emptyMetadataResponseV0 = []byte{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
brokersNoTopicsMetadataResponseV0 = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0xab, 0xff,
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
0x00, 0x00, 0x00, 0x33,
0x00, 0x01, 0x02, 0x03,
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
0x00, 0x00, 0x01, 0x11,
0x00, 0x00, 0x00, 0x00}
topicsNoBrokersMetadataResponseV0 = []byte{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x04,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x07,
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x00}
brokersNoTopicsMetadataResponseV1 = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0xab, 0xff,
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
0x00, 0x00, 0x00, 0x33,
0x00, 0x05, 'r', 'a', 'c', 'k', '0',
0x00, 0x01, 0x02, 0x03,
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
0x00, 0x00, 0x01, 0x11,
0x00, 0x05, 'r', 'a', 'c', 'k', '1',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00}
topicsNoBrokersMetadataResponseV1 = []byte{
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x03, 'f', 'o', 'o',
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x04,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x07,
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x03, 'b', 'a', 'r',
0x01,
0x00, 0x00, 0x00, 0x00}
noBrokersNoTopicsWithThrottleTimeAndClusterIDV3 = []byte{
0x00, 0x00, 0x00, 0x10,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00}
noBrokersOneTopicWithOfflineReplicasV5 = []byte{
0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd',
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00,
0x00, 0x03, 'f', 'o', 'o',
0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x04,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x07,
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
}
)
func TestEmptyMetadataResponseV0(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "empty, V0", &response, emptyMetadataResponseV0, 0)
if len(response.Brokers) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
}
}
func TestMetadataResponseWithBrokersV0(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "brokers, no topics, V0", &response, brokersNoTopicsMetadataResponseV0, 0)
if len(response.Brokers) != 2 {
t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
}
if response.Brokers[0].id != 0xabff {
t.Error("Decoding produced invalid broker 0 id.")
}
if response.Brokers[0].addr != "localhost:51" {
t.Error("Decoding produced invalid broker 0 address.")
}
if response.Brokers[1].id != 0x010203 {
t.Error("Decoding produced invalid broker 1 id.")
}
if response.Brokers[1].addr != "google.com:273" {
t.Error("Decoding produced invalid broker 1 address.")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Topics), "topics where there were none!")
}
}
func TestMetadataResponseWithTopicsV0(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "topics, no brokers, V0", &response, topicsNoBrokersMetadataResponseV0, 0)
if len(response.Brokers) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
if len(response.Topics) != 2 {
t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!")
}
if response.Topics[0].Err != ErrNoError {
t.Error("Decoding produced invalid topic 0 error.")
}
if response.Topics[0].Name != "foo" {
t.Error("Decoding produced invalid topic 0 name.")
}
if len(response.Topics[0].Partitions) != 1 {
t.Fatal("Decoding produced invalid partition count for topic 0.")
}
if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize {
t.Error("Decoding produced invalid topic 0 partition 0 error.")
}
if response.Topics[0].Partitions[0].ID != 0x01 {
t.Error("Decoding produced invalid topic 0 partition 0 id.")
}
if response.Topics[0].Partitions[0].Leader != 0x07 {
t.Error("Decoding produced invalid topic 0 partition 0 leader.")
}
if len(response.Topics[0].Partitions[0].Replicas) != 3 {
t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.")
}
for i := 0; i < 3; i++ {
if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) {
t.Error("Decoding produced invalid topic 0 partition 0 replica", i)
}
}
if len(response.Topics[0].Partitions[0].Isr) != 0 {
t.Error("Decoding produced invalid topic 0 partition 0 isr length.")
}
if response.Topics[1].Err != ErrNoError {
t.Error("Decoding produced invalid topic 1 error.")
}
if response.Topics[1].Name != "bar" {
t.Error("Decoding produced invalid topic 0 name.")
}
if len(response.Topics[1].Partitions) != 0 {
t.Error("Decoding produced invalid partition count for topic 1.")
}
}
func TestMetadataResponseWithBrokersV1(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "topics, V1", &response, brokersNoTopicsMetadataResponseV1, 1)
if len(response.Brokers) != 2 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were 2!")
}
if response.Brokers[0].rack == nil || *response.Brokers[0].rack != "rack0" {
t.Error("Decoding produced invalid broker 0 rack.")
}
if response.Brokers[1].rack == nil || *response.Brokers[1].rack != "rack1" {
t.Error("Decoding produced invalid broker 1 rack.")
}
if response.ControllerID != 1 {
t.Error("Decoding produced", response.ControllerID, "should have been 1!")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
}
func TestMetadataResponseWithTopicsV1(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "topics, V1", &response, topicsNoBrokersMetadataResponseV1, 1)
if len(response.Brokers) != 0 {
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
}
if response.ControllerID != 4 {
t.Error("Decoding produced", response.ControllerID, "should have been 4!")
}
if len(response.Topics) != 2 {
t.Error("Decoding produced", len(response.Topics), "topics where there were 2!")
}
if response.Topics[0].IsInternal {
t.Error("Decoding produced", response.Topics[0], "topic0 should have been false!")
}
if !response.Topics[1].IsInternal {
t.Error("Decoding produced", response.Topics[1], "topic1 should have been true!")
}
}
func TestMetadataResponseWithThrottleTime(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "no topics, no brokers, throttle time and cluster Id V3", &response, noBrokersNoTopicsWithThrottleTimeAndClusterIDV3, 3)
if response.ThrottleTimeMs != int32(16) {
t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 16!")
}
if len(response.Brokers) != 0 {
t.Error("Decoding produced", response.Brokers, "should have been 0!")
}
if response.ControllerID != int32(1) {
t.Error("Decoding produced", response.ControllerID, "should have been 1!")
}
if *response.ClusterID != "clusterId" {
t.Error("Decoding produced", response.ClusterID, "should have been clusterId!")
}
if len(response.Topics) != 0 {
t.Error("Decoding produced", len(response.Topics), "should have been 0!")
}
}
func TestMetadataResponseWithOfflineReplicasV5(t *testing.T) {
response := MetadataResponse{}
testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, noBrokersOneTopicWithOfflineReplicasV5, 5)
if response.ThrottleTimeMs != int32(5) {
t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 5!")
}
if len(response.Brokers) != 0 {
t.Error("Decoding produced", response.Brokers, "should have been 0!")
}
if response.ControllerID != int32(2) {
t.Error("Decoding produced", response.ControllerID, "should have been 21!")
}
if *response.ClusterID != "clusterId" {
t.Error("Decoding produced", response.ClusterID, "should have been clusterId!")
}
if len(response.Topics) != 1 {
t.Error("Decoding produced", len(response.Topics), "should have been 1!")
}
if len(response.Topics[0].Partitions[0].OfflineReplicas) != 1 {
t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 1!")
}
}

View file

@ -1,172 +0,0 @@
package sarama
import (
"testing"
"github.com/rcrowley/go-metrics"
)
func TestGetOrRegisterHistogram(t *testing.T) {
metricRegistry := metrics.NewRegistry()
histogram := getOrRegisterHistogram("name", metricRegistry)
if histogram == nil {
t.Error("Unexpected nil histogram")
}
// Fetch the metric
foundHistogram := metricRegistry.Get("name")
if foundHistogram != histogram {
t.Error("Unexpected different histogram", foundHistogram, histogram)
}
// Try to register the metric again
sameHistogram := getOrRegisterHistogram("name", metricRegistry)
if sameHistogram != histogram {
t.Error("Unexpected different histogram", sameHistogram, histogram)
}
}
func TestGetMetricNameForBroker(t *testing.T) {
metricName := getMetricNameForBroker("name", &Broker{id: 1})
if metricName != "name-for-broker-1" {
t.Error("Unexpected metric name", metricName)
}
}
// Common type and functions for metric validation
type metricValidator struct {
name string
validator func(*testing.T, interface{})
}
type metricValidators []*metricValidator
func newMetricValidators() metricValidators {
return make([]*metricValidator, 0, 32)
}
func (m *metricValidators) register(validator *metricValidator) {
*m = append(*m, validator)
}
func (m *metricValidators) registerForBroker(broker *Broker, validator *metricValidator) {
m.register(&metricValidator{getMetricNameForBroker(validator.name, broker), validator.validator})
}
func (m *metricValidators) registerForGlobalAndTopic(topic string, validator *metricValidator) {
m.register(&metricValidator{validator.name, validator.validator})
m.register(&metricValidator{getMetricNameForTopic(validator.name, topic), validator.validator})
}
func (m *metricValidators) registerForAllBrokers(broker *Broker, validator *metricValidator) {
m.register(validator)
m.registerForBroker(broker, validator)
}
func (m metricValidators) run(t *testing.T, r metrics.Registry) {
for _, metricValidator := range m {
metric := r.Get(metricValidator.name)
if metric == nil {
t.Error("No metric named", metricValidator.name)
} else {
metricValidator.validator(t, metric)
}
}
}
func meterValidator(name string, extraValidator func(*testing.T, metrics.Meter)) *metricValidator {
return &metricValidator{
name: name,
validator: func(t *testing.T, metric interface{}) {
if meter, ok := metric.(metrics.Meter); !ok {
t.Errorf("Expected meter metric for '%s', got %T", name, metric)
} else {
extraValidator(t, meter)
}
},
}
}
func countMeterValidator(name string, expectedCount int) *metricValidator {
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
count := meter.Count()
if count != int64(expectedCount) {
t.Errorf("Expected meter metric '%s' count = %d, got %d", name, expectedCount, count)
}
})
}
func minCountMeterValidator(name string, minCount int) *metricValidator {
return meterValidator(name, func(t *testing.T, meter metrics.Meter) {
count := meter.Count()
if count < int64(minCount) {
t.Errorf("Expected meter metric '%s' count >= %d, got %d", name, minCount, count)
}
})
}
func histogramValidator(name string, extraValidator func(*testing.T, metrics.Histogram)) *metricValidator {
return &metricValidator{
name: name,
validator: func(t *testing.T, metric interface{}) {
if histogram, ok := metric.(metrics.Histogram); !ok {
t.Errorf("Expected histogram metric for '%s', got %T", name, metric)
} else {
extraValidator(t, histogram)
}
},
}
}
func countHistogramValidator(name string, expectedCount int) *metricValidator {
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
count := histogram.Count()
if count != int64(expectedCount) {
t.Errorf("Expected histogram metric '%s' count = %d, got %d", name, expectedCount, count)
}
})
}
func minCountHistogramValidator(name string, minCount int) *metricValidator {
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
count := histogram.Count()
if count < int64(minCount) {
t.Errorf("Expected histogram metric '%s' count >= %d, got %d", name, minCount, count)
}
})
}
func minMaxHistogramValidator(name string, expectedMin int, expectedMax int) *metricValidator {
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
min := int(histogram.Min())
if min != expectedMin {
t.Errorf("Expected histogram metric '%s' min = %d, got %d", name, expectedMin, min)
}
max := int(histogram.Max())
if max != expectedMax {
t.Errorf("Expected histogram metric '%s' max = %d, got %d", name, expectedMax, max)
}
})
}
func minValHistogramValidator(name string, minMin int) *metricValidator {
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
min := int(histogram.Min())
if min < minMin {
t.Errorf("Expected histogram metric '%s' min >= %d, got %d", name, minMin, min)
}
})
}
func maxValHistogramValidator(name string, maxMax int) *metricValidator {
return histogramValidator(name, func(t *testing.T, histogram metrics.Histogram) {
max := int(histogram.Max())
if max > maxMax {
t.Errorf("Expected histogram metric '%s' max <= %d, got %d", name, maxMax, max)
}
})
}

View file

@ -1,90 +0,0 @@
package sarama
import "testing"
var (
offsetCommitRequestNoBlocksV0 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestNoBlocksV1 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestNoBlocksV2 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
0x00, 0x00, 0x00, 0x00}
offsetCommitRequestOneBlockV0 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
offsetCommitRequestOneBlockV1 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
offsetCommitRequestOneBlockV2 = []byte{
0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r',
0x00, 0x00, 0x11, 0x22,
0x00, 0x04, 'c', 'o', 'n', 's',
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x52, 0x21,
0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF,
0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'}
)
func TestOffsetCommitRequestV0(t *testing.T) {
request := new(OffsetCommitRequest)
request.Version = 0
request.ConsumerGroup = "foobar"
testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0)
}
func TestOffsetCommitRequestV1(t *testing.T) {
request := new(OffsetCommitRequest)
request.ConsumerGroup = "foobar"
request.ConsumerID = "cons"
request.ConsumerGroupGeneration = 0x1122
request.Version = 1
testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata")
testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1)
}
func TestOffsetCommitRequestV2(t *testing.T) {
request := new(OffsetCommitRequest)
request.ConsumerGroup = "foobar"
request.ConsumerID = "cons"
request.ConsumerGroupGeneration = 0x1122
request.RetentionTime = 0x4433
request.Version = 2
testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2)
request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata")
testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2)
}

View file

@ -1,24 +0,0 @@
package sarama
import (
"testing"
)
var (
emptyOffsetCommitResponse = []byte{
0x00, 0x00, 0x00, 0x00}
)
func TestEmptyOffsetCommitResponse(t *testing.T) {
response := OffsetCommitResponse{}
testResponse(t, "empty", &response, emptyOffsetCommitResponse)
}
func TestNormalOffsetCommitResponse(t *testing.T) {
response := OffsetCommitResponse{}
response.AddError("t", 0, ErrNotLeaderForPartition)
response.Errors["m"] = make(map[int32]KError)
// The response encoded form cannot be checked for it varies due to
// unpredictable map traversal order.
testResponse(t, "normal", &response, nil)
}

View file

@ -1,31 +0,0 @@
package sarama
import "testing"
var (
offsetFetchRequestNoGroupNoPartitions = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
offsetFetchRequestNoPartitions = []byte{
0x00, 0x04, 'b', 'l', 'a', 'h',
0x00, 0x00, 0x00, 0x00}
offsetFetchRequestOnePartition = []byte{
0x00, 0x04, 'b', 'l', 'a', 'h',
0x00, 0x00, 0x00, 0x01,
0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't',
0x00, 0x00, 0x00, 0x01,
0x4F, 0x4F, 0x4F, 0x4F}
)
func TestOffsetFetchRequest(t *testing.T) {
request := new(OffsetFetchRequest)
testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions)
request.ConsumerGroup = "blah"
testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions)
request.AddPartition("topicTheFirst", 0x4F4F4F4F)
testRequest(t, "one partition", request, offsetFetchRequestOnePartition)
}

View file

@ -1,22 +0,0 @@
package sarama
import "testing"
var (
emptyOffsetFetchResponse = []byte{
0x00, 0x00, 0x00, 0x00}
)
func TestEmptyOffsetFetchResponse(t *testing.T) {
response := OffsetFetchResponse{}
testResponse(t, "empty", &response, emptyOffsetFetchResponse)
}
func TestNormalOffsetFetchResponse(t *testing.T) {
response := OffsetFetchResponse{}
response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut})
response.Blocks["m"] = nil
// The response encoded form cannot be checked for it varies due to
// unpredictable map traversal order.
testResponse(t, "normal", &response, nil)
}

View file

@ -1,433 +0,0 @@
package sarama
import (
"testing"
"time"
)
func initOffsetManager(t *testing.T) (om OffsetManager,
testClient Client, broker, coordinator *MockBroker) {
config := NewConfig()
config.Metadata.Retry.Max = 1
config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond
config.Version = V0_9_0_0
broker = NewMockBroker(t, 1)
coordinator = NewMockBroker(t, 2)
seedMeta := new(MetadataResponse)
seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID())
seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError)
seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError)
broker.Returns(seedMeta)
var err error
testClient, err = NewClient([]string{broker.Addr()}, config)
if err != nil {
t.Fatal(err)
}
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: coordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: coordinator.Port(),
})
om, err = NewOffsetManagerFromClient("group", testClient)
if err != nil {
t.Fatal(err)
}
return om, testClient, broker, coordinator
}
func initPartitionOffsetManager(t *testing.T, om OffsetManager,
coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager {
fetchResponse := new(OffsetFetchResponse)
fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{
Err: ErrNoError,
Offset: initialOffset,
Metadata: metadata,
})
coordinator.Returns(fetchResponse)
pom, err := om.ManagePartition("my_topic", 0)
if err != nil {
t.Fatal(err)
}
return pom
}
func TestNewOffsetManager(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
seedBroker.Returns(new(MetadataResponse))
testClient, err := NewClient([]string{seedBroker.Addr()}, nil)
if err != nil {
t.Fatal(err)
}
_, err = NewOffsetManagerFromClient("group", testClient)
if err != nil {
t.Error(err)
}
safeClose(t, testClient)
_, err = NewOffsetManagerFromClient("group", testClient)
if err != ErrClosedClient {
t.Errorf("Error expected for closed client; actual value: %v", err)
}
seedBroker.Close()
}
// Test recovery from ErrNotCoordinatorForConsumer
// on first fetchInitialOffset call
func TestOffsetManagerFetchInitialFail(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
// Error on first fetchInitialOffset call
responseBlock := OffsetFetchResponseBlock{
Err: ErrNotCoordinatorForConsumer,
Offset: 5,
Metadata: "test_meta",
}
fetchResponse := new(OffsetFetchResponse)
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
coordinator.Returns(fetchResponse)
// Refresh coordinator
newCoordinator := NewMockBroker(t, 3)
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
// Second fetchInitialOffset call is fine
fetchResponse2 := new(OffsetFetchResponse)
responseBlock2 := responseBlock
responseBlock2.Err = ErrNoError
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
newCoordinator.Returns(fetchResponse2)
pom, err := om.ManagePartition("my_topic", 0)
if err != nil {
t.Error(err)
}
broker.Close()
coordinator.Close()
newCoordinator.Close()
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
}
// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress
func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
// Error on first fetchInitialOffset call
responseBlock := OffsetFetchResponseBlock{
Err: ErrOffsetsLoadInProgress,
Offset: 5,
Metadata: "test_meta",
}
fetchResponse := new(OffsetFetchResponse)
fetchResponse.AddBlock("my_topic", 0, &responseBlock)
coordinator.Returns(fetchResponse)
// Second fetchInitialOffset call is fine
fetchResponse2 := new(OffsetFetchResponse)
responseBlock2 := responseBlock
responseBlock2.Err = ErrNoError
fetchResponse2.AddBlock("my_topic", 0, &responseBlock2)
coordinator.Returns(fetchResponse2)
pom, err := om.ManagePartition("my_topic", 0)
if err != nil {
t.Error(err)
}
broker.Close()
coordinator.Close()
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
}
func TestPartitionOffsetManagerInitialOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
testClient.Config().Consumer.Offsets.Initial = OffsetOldest
// Kafka returns -1 if no offset has been stored for this partition yet.
pom := initPartitionOffsetManager(t, om, coordinator, -1, "")
offset, meta := pom.NextOffset()
if offset != OffsetOldest {
t.Errorf("Expected offset 5. Actual: %v", offset)
}
if meta != "" {
t.Errorf("Expected metadata to be empty. Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
broker.Close()
coordinator.Close()
safeClose(t, testClient)
}
func TestPartitionOffsetManagerNextOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta")
offset, meta := pom.NextOffset()
if offset != 5 {
t.Errorf("Expected offset 5. Actual: %v", offset)
}
if meta != "test_meta" {
t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
broker.Close()
coordinator.Close()
safeClose(t, testClient)
}
func TestPartitionOffsetManagerResetOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
coordinator.Returns(ocResponse)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
testClient.Config().Consumer.Offsets.Retention = time.Hour
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
handler := func(req *request) (res encoder) {
if req.body.version() != 2 {
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
}
offsetCommitRequest := req.body.(*OffsetCommitRequest)
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
}
return ocResponse
}
coordinator.setHandler(handler)
expected := int64(1)
pom.ResetOffset(expected, "modified_meta")
actual, meta := pom.NextOffset()
if actual != expected {
t.Errorf("Expected offset %v. Actual: %v", expected, actual)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerMarkOffset(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
coordinator.Returns(ocResponse)
pom.MarkOffset(100, "modified_meta")
offset, meta := pom.NextOffset()
if offset != 100 {
t.Errorf("Expected offset 100. Actual: %v", offset)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
testClient.Config().Consumer.Offsets.Retention = time.Hour
pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta")
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
handler := func(req *request) (res encoder) {
if req.body.version() != 2 {
t.Errorf("Expected to be using version 2. Actual: %v", req.body.version())
}
offsetCommitRequest := req.body.(*OffsetCommitRequest)
if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) {
t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime)
}
return ocResponse
}
coordinator.setHandler(handler)
pom.MarkOffset(100, "modified_meta")
offset, meta := pom.NextOffset()
if offset != 100 {
t.Errorf("Expected offset 100. Actual: %v", offset)
}
if meta != "modified_meta" {
t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta)
}
safeClose(t, pom)
safeClose(t, om)
safeClose(t, testClient)
broker.Close()
coordinator.Close()
}
func TestPartitionOffsetManagerCommitErr(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
// Error on one partition
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange)
ocResponse.AddError("my_topic", 1, ErrNoError)
coordinator.Returns(ocResponse)
newCoordinator := NewMockBroker(t, 3)
// For RefreshCoordinator()
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
// Nothing in response.Errors at all
ocResponse2 := new(OffsetCommitResponse)
newCoordinator.Returns(ocResponse2)
// For RefreshCoordinator()
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
// Error on the wrong partition for this pom
ocResponse3 := new(OffsetCommitResponse)
ocResponse3.AddError("my_topic", 1, ErrNoError)
newCoordinator.Returns(ocResponse3)
// For RefreshCoordinator()
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
// ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block
ocResponse4 := new(OffsetCommitResponse)
ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition)
newCoordinator.Returns(ocResponse4)
// For RefreshCoordinator()
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
// Normal error response
ocResponse5 := new(OffsetCommitResponse)
ocResponse5.AddError("my_topic", 0, ErrNoError)
newCoordinator.Returns(ocResponse5)
pom.MarkOffset(100, "modified_meta")
err := pom.Close()
if err != nil {
t.Error(err)
}
broker.Close()
coordinator.Close()
newCoordinator.Close()
safeClose(t, om)
safeClose(t, testClient)
}
// Test of recovery from abort
func TestAbortPartitionOffsetManager(t *testing.T) {
om, testClient, broker, coordinator := initOffsetManager(t)
pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta")
// this triggers an error in the CommitOffset request,
// which leads to the abort call
coordinator.Close()
// Response to refresh coordinator request
newCoordinator := NewMockBroker(t, 3)
broker.Returns(&ConsumerMetadataResponse{
CoordinatorID: newCoordinator.BrokerID(),
CoordinatorHost: "127.0.0.1",
CoordinatorPort: newCoordinator.Port(),
})
ocResponse := new(OffsetCommitResponse)
ocResponse.AddError("my_topic", 0, ErrNoError)
newCoordinator.Returns(ocResponse)
pom.MarkOffset(100, "modified_meta")
safeClose(t, pom)
safeClose(t, om)
broker.Close()
safeClose(t, testClient)
}

View file

@ -1,43 +0,0 @@
package sarama
import "testing"
var (
offsetRequestNoBlocks = []byte{
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00}
offsetRequestOneBlock = []byte{
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02}
offsetRequestOneBlockV1 = []byte{
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'b', 'a', 'r',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
)
func TestOffsetRequest(t *testing.T) {
request := new(OffsetRequest)
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
request.AddBlock("foo", 4, 1, 2)
testRequest(t, "one block", request, offsetRequestOneBlock)
}
func TestOffsetRequestV1(t *testing.T) {
request := new(OffsetRequest)
request.Version = 1
testRequest(t, "no blocks", request, offsetRequestNoBlocks)
request.AddBlock("bar", 4, 1, 2) // Last argument is ignored for V1
testRequest(t, "one block", request, offsetRequestOneBlockV1)
}

View file

@ -1,111 +0,0 @@
package sarama
import "testing"
var (
emptyOffsetResponse = []byte{
0x00, 0x00, 0x00, 0x00}
normalOffsetResponse = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x01, 'a',
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 'z',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
normalOffsetResponseV1 = []byte{
0x00, 0x00, 0x00, 0x02,
0x00, 0x01, 'a',
0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 'z',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00,
0x00, 0x00, 0x01, 0x58, 0x1A, 0xE6, 0x48, 0x86,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}
)
func TestEmptyOffsetResponse(t *testing.T) {
response := OffsetResponse{}
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
}
response = OffsetResponse{}
testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 1)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none.")
}
}
func TestNormalOffsetResponse(t *testing.T) {
response := OffsetResponse{}
testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0)
if len(response.Blocks) != 2 {
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
}
if len(response.Blocks["a"]) != 0 {
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
}
if len(response.Blocks["z"]) != 1 {
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
}
if response.Blocks["z"][2].Err != ErrNoError {
t.Fatal("Decoding produced invalid error for topic z partition 2.")
}
if len(response.Blocks["z"][2].Offsets) != 2 {
t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.")
}
if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 {
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
}
}
func TestNormalOffsetResponseV1(t *testing.T) {
response := OffsetResponse{}
testVersionDecodable(t, "normal", &response, normalOffsetResponseV1, 1)
if len(response.Blocks) != 2 {
t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.")
}
if len(response.Blocks["a"]) != 0 {
t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.")
}
if len(response.Blocks["z"]) != 1 {
t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.")
}
if response.Blocks["z"][2].Err != ErrNoError {
t.Fatal("Decoding produced invalid error for topic z partition 2.")
}
if response.Blocks["z"][2].Timestamp != 1477920049286 {
t.Fatal("Decoding produced invalid timestamp for topic z partition 2.", response.Blocks["z"][2].Timestamp)
}
if response.Blocks["z"][2].Offset != 6 {
t.Fatal("Decoding produced invalid offsets for topic z partition 2.")
}
}

View file

@ -1,265 +0,0 @@
package sarama
import (
"crypto/rand"
"hash/fnv"
"log"
"testing"
)
func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) {
choice, err := partitioner.Partition(message, numPartitions)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= numPartitions {
t.Error(partitioner, "returned partition", choice, "outside of range for", message)
}
for i := 1; i < 50; i++ {
newChoice, err := partitioner.Partition(message, numPartitions)
if err != nil {
t.Error(partitioner, err)
}
if newChoice != choice {
t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".")
}
}
}
func TestRandomPartitioner(t *testing.T) {
partitioner := NewRandomPartitioner("mytopic")
choice, err := partitioner.Partition(nil, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(nil, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range.")
}
}
}
func TestRoundRobinPartitioner(t *testing.T) {
partitioner := NewRoundRobinPartitioner("mytopic")
choice, err := partitioner.Partition(nil, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
var i int32
for i = 1; i < 50; i++ {
choice, err := partitioner.Partition(nil, 7)
if err != nil {
t.Error(partitioner, err)
}
if choice != i%7 {
t.Error("Returned partition", choice, "expecting", i%7)
}
}
}
func TestNewHashPartitionerWithHasher(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
buf := make([]byte, 256)
for i := 1; i < 50; i++ {
if _, err := rand.Read(buf); err != nil {
t.Error(err)
}
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
}
}
func TestHashPartitionerWithHasherMinInt32(t *testing.T) {
// use the current default hasher fnv.New32a()
partitioner := NewCustomHashPartitioner(fnv.New32a)("mytopic")
msg := ProducerMessage{}
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
// which is -2147483648 or int32's min value
msg.Key = StringEncoder("1468509572224")
choice, err := partitioner.Partition(&msg, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
func TestHashPartitioner(t *testing.T) {
partitioner := NewHashPartitioner("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := 1; i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
buf := make([]byte, 256)
for i := 1; i < 50; i++ {
if _, err := rand.Read(buf); err != nil {
t.Error(err)
}
assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50)
}
}
func TestHashPartitionerMinInt32(t *testing.T) {
partitioner := NewHashPartitioner("mytopic")
msg := ProducerMessage{}
// "1468509572224" generates 2147483648 (uint32) result from Sum32 function
// which is -2147483648 or int32's min value
msg.Key = StringEncoder("1468509572224")
choice, err := partitioner.Partition(&msg, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice < 0 || choice >= 50 {
t.Error("Returned partition", choice, "outside of range for nil key.")
}
}
func TestManualPartitioner(t *testing.T) {
partitioner := NewManualPartitioner("mytopic")
choice, err := partitioner.Partition(&ProducerMessage{}, 1)
if err != nil {
t.Error(partitioner, err)
}
if choice != 0 {
t.Error("Returned non-zero partition when only one available.")
}
for i := int32(1); i < 50; i++ {
choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50)
if err != nil {
t.Error(partitioner, err)
}
if choice != i {
t.Error("Returned partition not the same as the input partition")
}
}
}
// By default, Sarama uses the message's key to consistently assign a partition to
// a message using hashing. If no key is set, a random partition will be chosen.
// This example shows how you can partition messages randomly, even when a key is set,
// by overriding Config.Producer.Partitioner.
func ExamplePartitioner_random() {
config := NewConfig()
config.Producer.Partitioner = NewRandomPartitioner
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Println("Failed to close producer:", err)
}
}()
msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Fatalln("Failed to produce message to kafka cluster.")
}
log.Printf("Produced message to partition %d with offset %d", partition, offset)
}
// This example shows how to assign partitions to your messages manually.
func ExamplePartitioner_manual() {
config := NewConfig()
// First, we tell the producer that we are going to partition ourselves.
config.Producer.Partitioner = NewManualPartitioner
producer, err := NewSyncProducer([]string{"localhost:9092"}, config)
if err != nil {
log.Fatal(err)
}
defer func() {
if err := producer.Close(); err != nil {
log.Println("Failed to close producer:", err)
}
}()
// Now, we set the Partition field of the ProducerMessage struct.
msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
log.Fatalln("Failed to produce message to kafka cluster.")
}
if partition != 6 {
log.Fatal("Message should have been produced to partition 6!")
}
log.Printf("Produced message to partition %d with offset %d", partition, offset)
}
// This example shows how to set a different partitioner depending on the topic.
func ExamplePartitioner_per_topic() {
config := NewConfig()
config.Producer.Partitioner = func(topic string) Partitioner {
switch topic {
case "access_log", "error_log":
return NewRandomPartitioner(topic)
default:
return NewHashPartitioner(topic)
}
}
// ...
}

View file

@ -1,106 +0,0 @@
package sarama
import (
"testing"
"time"
)
var (
produceRequestEmpty = []byte{
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00}
produceRequestHeader = []byte{
0x01, 0x23,
0x00, 0x00, 0x04, 0x44,
0x00, 0x00, 0x00, 0x00}
produceRequestOneMessage = []byte{
0x01, 0x23,
0x00, 0x00, 0x04, 0x44,
0x00, 0x00, 0x00, 0x01,
0x00, 0x05, 't', 'o', 'p', 'i', 'c',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0xAD,
0x00, 0x00, 0x00, 0x1C,
// messageSet
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x10,
// message
0x23, 0x96, 0x4a, 0xf7, // CRC
0x00,
0x00,
0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x02, 0x00, 0xEE}
produceRequestOneRecord = []byte{
0xFF, 0xFF, // Transaction ID
0x01, 0x23, // Required Acks
0x00, 0x00, 0x04, 0x44, // Timeout
0x00, 0x00, 0x00, 0x01, // Number of Topics
0x00, 0x05, 't', 'o', 'p', 'i', 'c', // Topic
0x00, 0x00, 0x00, 0x01, // Number of Partitions
0x00, 0x00, 0x00, 0xAD, // Partition
0x00, 0x00, 0x00, 0x52, // Records length
// recordBatch
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x46,
0x00, 0x00, 0x00, 0x00,
0x02,
0xCA, 0x33, 0xBC, 0x05,
0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
// record
0x28,
0x00,
0x0A,
0x00,
0x08, 0x01, 0x02, 0x03, 0x04,
0x06, 0x05, 0x06, 0x07,
0x02,
0x06, 0x08, 0x09, 0x0A,
0x04, 0x0B, 0x0C,
}
)
func TestProduceRequest(t *testing.T) {
request := new(ProduceRequest)
testRequest(t, "empty", request, produceRequestEmpty)
request.RequiredAcks = 0x123
request.Timeout = 0x444
testRequest(t, "header", request, produceRequestHeader)
request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}})
testRequest(t, "one message", request, produceRequestOneMessage)
request.Version = 3
batch := &RecordBatch{
LastOffsetDelta: 1,
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{0x01, 0x02, 0x03, 0x04},
Value: []byte{0x05, 0x06, 0x07},
Headers: []*RecordHeader{{
Key: []byte{0x08, 0x09, 0x0A},
Value: []byte{0x0B, 0x0C},
}},
}},
}
request.AddBatch("topic", 0xAD, batch)
packet := testRequestEncode(t, "one record", request, produceRequestOneRecord)
// compressRecords field is not populated on decoding because consumers
// are only interested in decoded records.
batch.compressedRecords = nil
testRequestDecode(t, "one record", request, packet)
}

View file

@ -1,128 +0,0 @@
package sarama
import (
"fmt"
"testing"
"time"
)
var (
produceResponseNoBlocksV0 = []byte{
0x00, 0x00, 0x00, 0x00}
produceResponseManyBlocksVersions = [][]byte{
{
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
}, {
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
}, {
0x00, 0x00, 0x00, 0x01,
0x00, 0x03, 'f', 'o', 'o',
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x01, // Partition 1
0x00, 0x02, // ErrInvalidMessage
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, // Offset 255
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE8, // Timestamp January 1st 0001 at 00:00:01,000 UTC (LogAppendTime was used)
0x00, 0x00, 0x00, 0x64, // 100 ms throttle time
},
}
)
func TestProduceResponseDecode(t *testing.T) {
response := ProduceResponse{}
testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocksV0, 0)
if len(response.Blocks) != 0 {
t.Error("Decoding produced", len(response.Blocks), "topics where there were none")
}
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
t.Logf("Decoding produceResponseManyBlocks version %d", v)
testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, int16(v))
if len(response.Blocks) != 1 {
t.Error("Decoding produced", len(response.Blocks), "topics where there was 1")
}
if len(response.Blocks["foo"]) != 1 {
t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there was one")
}
block := response.GetBlock("foo", 1)
if block == nil {
t.Error("Decoding did not produce a block for foo/1")
} else {
if block.Err != ErrInvalidMessage {
t.Error("Decoding failed for foo/2/Err, got:", int16(block.Err))
}
if block.Offset != 255 {
t.Error("Decoding failed for foo/1/Offset, got:", block.Offset)
}
if v >= 2 {
if block.Timestamp != time.Unix(1, 0) {
t.Error("Decoding failed for foo/2/Timestamp, got:", block.Timestamp)
}
}
}
if v >= 1 {
if expected := 100 * time.Millisecond; response.ThrottleTime != expected {
t.Error("Failed decoding produced throttle time, expected:", expected, ", got:", response.ThrottleTime)
}
}
}
}
func TestProduceResponseEncode(t *testing.T) {
response := ProduceResponse{}
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
testEncodable(t, "empty", &response, produceResponseNoBlocksV0)
response.Blocks["foo"] = make(map[int32]*ProduceResponseBlock)
response.Blocks["foo"][1] = &ProduceResponseBlock{
Err: ErrInvalidMessage,
Offset: 255,
Timestamp: time.Unix(1, 0),
}
response.ThrottleTime = 100 * time.Millisecond
for v, produceResponseManyBlocks := range produceResponseManyBlocksVersions {
response.Version = int16(v)
testEncodable(t, fmt.Sprintf("many blocks version %d", v), &response, produceResponseManyBlocks)
}
}
func TestProduceResponseEncodeInvalidTimestamp(t *testing.T) {
response := ProduceResponse{}
response.Version = 2
response.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
response.Blocks["t"] = make(map[int32]*ProduceResponseBlock)
response.Blocks["t"][0] = &ProduceResponseBlock{
Err: ErrNoError,
Offset: 0,
// Use a timestamp before Unix time
Timestamp: time.Unix(0, 0).Add(-1 * time.Millisecond),
}
response.ThrottleTime = 100 * time.Millisecond
_, err := encode(&response, nil)
if err == nil {
t.Error("Expecting error, got nil")
}
if _, ok := err.(PacketEncodingError); !ok {
t.Error("Expecting PacketEncodingError, got:", err)
}
}

View file

@ -1,255 +0,0 @@
package sarama
import (
"fmt"
"testing"
"time"
)
func makeProduceSet() (*asyncProducer, *produceSet) {
parent := &asyncProducer{
conf: NewConfig(),
}
return parent, newProduceSet(parent)
}
func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) {
if err := ps.add(msg); err != nil {
t.Error(err)
}
}
func TestProduceSetInitial(t *testing.T) {
_, ps := makeProduceSet()
if !ps.empty() {
t.Error("New produceSet should be empty")
}
if ps.readyToFlush() {
t.Error("Empty produceSet must never be ready to flush")
}
}
func TestProduceSetAddingMessages(t *testing.T) {
parent, ps := makeProduceSet()
parent.conf.Producer.Flush.MaxMessages = 1000
msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)}
safeAddMessage(t, ps, msg)
if ps.empty() {
t.Error("set shouldn't be empty when a message is added")
}
if !ps.readyToFlush() {
t.Error("by default set should be ready to flush when any message is in place")
}
for i := 0; i < 999; i++ {
if ps.wouldOverflow(msg) {
t.Error("set shouldn't fill up after only", i+1, "messages")
}
safeAddMessage(t, ps, msg)
}
if !ps.wouldOverflow(msg) {
t.Error("set should be full after 1000 messages")
}
}
func TestProduceSetPartitionTracking(t *testing.T) {
_, ps := makeProduceSet()
m1 := &ProducerMessage{Topic: "t1", Partition: 0}
m2 := &ProducerMessage{Topic: "t1", Partition: 1}
m3 := &ProducerMessage{Topic: "t2", Partition: 0}
safeAddMessage(t, ps, m1)
safeAddMessage(t, ps, m2)
safeAddMessage(t, ps, m3)
seenT1P0 := false
seenT1P1 := false
seenT2P0 := false
ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
if len(msgs) != 1 {
t.Error("Wrong message count")
}
if topic == "t1" && partition == 0 {
seenT1P0 = true
} else if topic == "t1" && partition == 1 {
seenT1P1 = true
} else if topic == "t2" && partition == 0 {
seenT2P0 = true
}
})
if !seenT1P0 {
t.Error("Didn't see t1p0")
}
if !seenT1P1 {
t.Error("Didn't see t1p1")
}
if !seenT2P0 {
t.Error("Didn't see t2p0")
}
if len(ps.dropPartition("t1", 1)) != 1 {
t.Error("Got wrong messages back from dropping partition")
}
if ps.bufferCount != 2 {
t.Error("Incorrect buffer count after dropping partition")
}
}
func TestProduceSetRequestBuilding(t *testing.T) {
parent, ps := makeProduceSet()
parent.conf.Producer.RequiredAcks = WaitForAll
parent.conf.Producer.Timeout = 10 * time.Second
msg := &ProducerMessage{
Topic: "t1",
Partition: 0,
Key: StringEncoder(TestMessage),
Value: StringEncoder(TestMessage),
}
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
}
msg.Partition = 1
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
}
msg.Topic = "t2"
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
}
req := ps.buildRequest()
if req.RequiredAcks != WaitForAll {
t.Error("RequiredAcks not set properly")
}
if req.Timeout != 10000 {
t.Error("Timeout not set properly")
}
if len(req.records) != 2 {
t.Error("Wrong number of topics in request")
}
}
func TestProduceSetCompressedRequestBuilding(t *testing.T) {
parent, ps := makeProduceSet()
parent.conf.Producer.RequiredAcks = WaitForAll
parent.conf.Producer.Timeout = 10 * time.Second
parent.conf.Producer.Compression = CompressionGZIP
parent.conf.Version = V0_10_0_0
msg := &ProducerMessage{
Topic: "t1",
Partition: 0,
Key: StringEncoder(TestMessage),
Value: StringEncoder(TestMessage),
Timestamp: time.Now(),
}
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
}
req := ps.buildRequest()
if req.Version != 2 {
t.Error("Wrong request version")
}
for _, msgBlock := range req.records["t1"][0].MsgSet.Messages {
msg := msgBlock.Msg
err := msg.decodeSet()
if err != nil {
t.Error("Failed to decode set from payload")
}
for i, compMsgBlock := range msg.Set.Messages {
compMsg := compMsgBlock.Msg
if compMsg.Version != 1 {
t.Error("Wrong compressed message version")
}
if compMsgBlock.Offset != int64(i) {
t.Errorf("Wrong relative inner offset, expected %d, got %d", i, compMsgBlock.Offset)
}
}
if msg.Version != 1 {
t.Error("Wrong compressed parent message version")
}
}
}
func TestProduceSetV3RequestBuilding(t *testing.T) {
parent, ps := makeProduceSet()
parent.conf.Producer.RequiredAcks = WaitForAll
parent.conf.Producer.Timeout = 10 * time.Second
parent.conf.Version = V0_11_0_0
now := time.Now()
msg := &ProducerMessage{
Topic: "t1",
Partition: 0,
Key: StringEncoder(TestMessage),
Value: StringEncoder(TestMessage),
Headers: []RecordHeader{
RecordHeader{
Key: []byte("header-1"),
Value: []byte("value-1"),
},
RecordHeader{
Key: []byte("header-2"),
Value: []byte("value-2"),
},
RecordHeader{
Key: []byte("header-3"),
Value: []byte("value-3"),
},
},
Timestamp: now,
}
for i := 0; i < 10; i++ {
safeAddMessage(t, ps, msg)
msg.Timestamp = msg.Timestamp.Add(time.Second)
}
req := ps.buildRequest()
if req.Version != 3 {
t.Error("Wrong request version")
}
batch := req.records["t1"][0].RecordBatch
if batch.FirstTimestamp != now {
t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp)
}
for i := 0; i < 10; i++ {
rec := batch.Records[i]
if rec.TimestampDelta != time.Duration(i)*time.Second {
t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta)
}
if rec.OffsetDelta != int64(i) {
t.Errorf("Wrong relative inner offset, expected %d, got %d", i, rec.OffsetDelta)
}
for j, h := range batch.Records[i].Headers {
exp := fmt.Sprintf("header-%d", j+1)
if string(h.Key) != exp {
t.Errorf("Wrong header key, expected %v, got %v", exp, h.Key)
}
exp = fmt.Sprintf("value-%d", j+1)
if string(h.Value) != exp {
t.Errorf("Wrong header value, expected %v, got %v", exp, h.Value)
}
}
}
}

View file

@ -1,292 +0,0 @@
package sarama
import (
"reflect"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
)
var recordBatchTestCases = []struct {
name string
batch RecordBatch
encoded []byte
oldGoEncoded []byte // used in case of gzipped content for go versions prior to 1.8
}{
{
name: "empty record",
batch: RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(0, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{},
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 49, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
89, 95, 183, 221, // CRC
0, 0, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 0, // Number of Records
},
},
{
name: "control batch",
batch: RecordBatch{
Version: 2,
Control: true,
FirstTimestamp: time.Unix(0, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{},
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 49, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
81, 46, 67, 217, // CRC
0, 32, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 0, 0, 0, 0, 0, 0, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 0, // Number of Records
},
},
{
name: "uncompressed record",
batch: RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 70, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
84, 121, 97, 253, // CRC
0, 0, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
40, // Record Length
0, // Attributes
10, // Timestamp Delta
0, // Offset Delta
8, // Key Length
1, 2, 3, 4,
6, // Value Length
5, 6, 7,
2, // Number of Headers
6, // Header Key Length
8, 9, 10, // Header Key
4, // Header Value Length
11, 12, // Header Value
},
},
{
name: "gzipped record",
batch: RecordBatch{
Version: 2,
Codec: CompressionGZIP,
CompressionLevel: CompressionLevelDefault,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 94, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
159, 236, 182, 189, // CRC
0, 1, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
},
oldGoEncoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 94, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
0, 216, 14, 210, // CRC
0, 1, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 210, 96, 224, 98, 224, 96, 100, 98, 102, 97, 99, 101,
99, 103, 98, 227, 224, 228, 98, 225, 230, 1, 4, 0, 0, 255, 255, 173, 201, 88, 103, 21, 0, 0, 0,
},
},
{
name: "snappy compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionSnappy,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 72, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
21, 0, 159, 97, // CRC
0, 2, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
21, 80, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2, 6, 8, 9, 10, 4, 11, 12,
},
},
{
name: "lz4 compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionLZ4,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
Value: []byte{5, 6, 7},
Headers: []*RecordHeader{{
Key: []byte{8, 9, 10},
Value: []byte{11, 12},
}},
}},
recordsLen: 21,
},
encoded: []byte{
0, 0, 0, 0, 0, 0, 0, 0, // First Offset
0, 0, 0, 89, // Length
0, 0, 0, 0, // Partition Leader Epoch
2, // Version
169, 74, 119, 197, // CRC
0, 3, // Attributes
0, 0, 0, 0, // Last Offset Delta
0, 0, 1, 88, 141, 205, 89, 56, // First Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Max Timestamp
0, 0, 0, 0, 0, 0, 0, 0, // Producer ID
0, 0, // Producer Epoch
0, 0, 0, 0, // First Sequence
0, 0, 0, 1, // Number of Records
4, 34, 77, 24, 100, 112, 185, 21, 0, 0, 128, 40, 0, 10, 0, 8, 1, 2, 3, 4, 6, 5, 6, 7, 2,
6, 8, 9, 10, 4, 11, 12, 0, 0, 0, 0, 12, 59, 239, 146,
},
},
}
func isOldGo(t *testing.T) bool {
v := strings.Split(runtime.Version()[2:], ".")
if len(v) < 2 {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
maj, err := strconv.Atoi(v[0])
if err != nil {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
min, err := strconv.Atoi(v[1])
if err != nil {
t.Logf("Can't parse version: %s", runtime.Version())
return false
}
return maj < 1 || (maj == 1 && min < 8)
}
func TestRecordBatchEncoding(t *testing.T) {
for _, tc := range recordBatchTestCases {
if tc.oldGoEncoded != nil && isOldGo(t) {
testEncodable(t, tc.name, &tc.batch, tc.oldGoEncoded)
} else {
testEncodable(t, tc.name, &tc.batch, tc.encoded)
}
}
}
func TestRecordBatchDecoding(t *testing.T) {
for _, tc := range recordBatchTestCases {
batch := RecordBatch{}
testDecodable(t, tc.name, &batch, tc.encoded)
for _, r := range batch.Records {
r.length = varintLengthField{}
}
for _, r := range tc.batch.Records {
r.length = varintLengthField{}
}
// The compression level is not restored on decoding. It is not needed
// anyway. We only set it here to ensure that comparision succeeds.
batch.CompressionLevel = tc.batch.CompressionLevel
if !reflect.DeepEqual(batch, tc.batch) {
t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch))
}
}
}

View file

@ -1,143 +0,0 @@
package sarama
import (
"bytes"
"reflect"
"testing"
)
func TestLegacyRecords(t *testing.T) {
set := &MessageSet{
Messages: []*MessageBlock{
{
Msg: &Message{
Version: 1,
},
},
},
}
r := newLegacyRecords(set)
exp, err := encode(set, nil)
if err != nil {
t.Fatal(err)
}
buf, err := encode(&r, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, exp) {
t.Errorf("Wrong encoding for legacy records, wanted %v, got %v", exp, buf)
}
set = &MessageSet{}
r = Records{}
err = decode(exp, set)
if err != nil {
t.Fatal(err)
}
err = decode(buf, &r)
if err != nil {
t.Fatal(err)
}
if r.recordsType != legacyRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords)
}
if !reflect.DeepEqual(set, r.MsgSet) {
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.MsgSet)
}
n, err := r.numRecords()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("Wrong number of records, wanted 1, got %d", n)
}
p, err := r.isPartial()
if err != nil {
t.Fatal(err)
}
if p {
t.Errorf("MessageSet shouldn't have a partial trailing message")
}
c, err := r.isControl()
if err != nil {
t.Fatal(err)
}
if c {
t.Errorf("MessageSet can't be a control batch")
}
}
func TestDefaultRecords(t *testing.T) {
batch := &RecordBatch{
Version: 2,
Records: []*Record{
{
Value: []byte{1},
},
},
}
r := newDefaultRecords(batch)
exp, err := encode(batch, nil)
if err != nil {
t.Fatal(err)
}
buf, err := encode(&r, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, exp) {
t.Errorf("Wrong encoding for default records, wanted %v, got %v", exp, buf)
}
batch = &RecordBatch{}
r = Records{}
err = decode(exp, batch)
if err != nil {
t.Fatal(err)
}
err = decode(buf, &r)
if err != nil {
t.Fatal(err)
}
if r.recordsType != defaultRecords {
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords)
}
if !reflect.DeepEqual(batch, r.RecordBatch) {
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.RecordBatch)
}
n, err := r.numRecords()
if err != nil {
t.Fatal(err)
}
if n != 1 {
t.Errorf("Wrong number of records, wanted 1, got %d", n)
}
p, err := r.isPartial()
if err != nil {
t.Fatal(err)
}
if p {
t.Errorf("RecordBatch shouldn't have a partial trailing record")
}
c, err := r.isControl()
if err != nil {
t.Fatal(err)
}
if c {
t.Errorf("RecordBatch shouldn't be a control batch")
}
}

View file

@ -1,105 +0,0 @@
package sarama
import (
"bytes"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
type testRequestBody struct {
}
func (s *testRequestBody) key() int16 {
return 0x666
}
func (s *testRequestBody) version() int16 {
return 0xD2
}
func (s *testRequestBody) encode(pe packetEncoder) error {
return pe.putString("abc")
}
// not specific to request tests, just helper functions for testing structures that
// implement the encoder or decoder interfaces that needed somewhere to live
func testEncodable(t *testing.T, name string, in encoder, expect []byte) {
packet, err := encode(in, nil)
if err != nil {
t.Error(err)
} else if !bytes.Equal(packet, expect) {
t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect)
}
}
func testDecodable(t *testing.T, name string, out decoder, in []byte) {
err := decode(in, out)
if err != nil {
t.Error("Decoding", name, "failed:", err)
}
}
func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) {
err := versionedDecode(in, out, version)
if err != nil {
t.Error("Decoding", name, "version", version, "failed:", err)
}
}
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
if !rb.requiredVersion().IsAtLeast(MinVersion) {
t.Errorf("Request %s has invalid required version", name)
}
packet := testRequestEncode(t, name, rb, expected)
testRequestDecode(t, name, rb, packet)
}
func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte {
req := &request{correlationID: 123, clientID: "foo", body: rb}
packet, err := encode(req, nil)
headerSize := 14 + len("foo")
if err != nil {
t.Error(err)
} else if !bytes.Equal(packet[headerSize:], expected) {
t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected)
}
return packet
}
func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte) {
decoded, n, err := decodeRequest(bytes.NewReader(packet))
if err != nil {
t.Error("Failed to decode request", err)
} else if decoded.correlationID != 123 || decoded.clientID != "foo" {
t.Errorf("Decoded header %q is not valid: %+v", name, decoded)
} else if !reflect.DeepEqual(rb, decoded.body) {
t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body))
} else if n != len(packet) {
t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet))
} else if rb.version() != decoded.body.version() {
t.Errorf("Decoded request %q version: %d does not match the encoded one: %d\n", name, decoded.body.version(), rb.version())
}
}
func testResponse(t *testing.T, name string, res protocolBody, expected []byte) {
encoded, err := encode(res, nil)
if err != nil {
t.Error(err)
} else if expected != nil && !bytes.Equal(encoded, expected) {
t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected)
}
decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder)
if err := versionedDecode(encoded, decoded, res.version()); err != nil {
t.Error("Decoding", name, "failed:", err)
}
if !reflect.DeepEqual(decoded, res) {
t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded)
}
}
func nullString(s string) *string { return &s }

View file

@ -1,21 +0,0 @@
package sarama
import "testing"
var (
responseHeaderBytes = []byte{
0x00, 0x00, 0x0f, 0x00,
0x0a, 0xbb, 0xcc, 0xff}
)
func TestResponseHeader(t *testing.T) {
header := responseHeader{}
testDecodable(t, "response header", &header, responseHeaderBytes)
if header.length != 0xf00 {
t.Error("Decoding header length failed, got", header.length)
}
if header.correlationID != 0x0abbccff {
t.Error("Decoding header correlation id failed, got", header.correlationID)
}
}

View file

@ -1,17 +0,0 @@
package sarama
import "testing"
var (
baseSaslRequest = []byte{
0, 3, 'f', 'o', 'o', // Mechanism
}
)
func TestSaslHandshakeRequest(t *testing.T) {
var request *SaslHandshakeRequest
request = new(SaslHandshakeRequest)
request.Mechanism = "foo"
testRequest(t, "basic", request, baseSaslRequest)
}

Some files were not shown because too many files have changed in this diff Show more