Add go mod support (#2503)

* Remove vendor and go-dep

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>

* Add go.mod

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>

* Update Makefile and .travis.yml

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2019-03-03 11:56:26 -08:00 committed by GitHub
parent 39d94835ee
commit 9d39ea51a7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2198 changed files with 223 additions and 891982 deletions

View file

@ -38,7 +38,6 @@ before_install:
before_script:
- docker run -d --net=host --name=etcd quay.io/coreos/etcd:v$ETCD_VERSION
- make godeps
script:
- make TEST_TYPE=$TEST_TYPE travis

860
Gopkg.lock generated
View file

@ -1,860 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57"
name = "cloud.google.com/go"
packages = ["compute/metadata"]
pruneopts = "NUT"
revision = "97efc2c9ffd9fe8ef47f7f3203dc60bbca547374"
version = "v0.28.0"
[[projects]]
digest = "1:f7a3877d7116adfcdcd641f9f6ad18fc2126bd5c080d8854cc3b395a01e0b7f3"
name = "github.com/DataDog/dd-trace-go"
packages = [
"opentracing",
"tracer",
"tracer/ext",
]
pruneopts = "NUT"
revision = "27617015d45e6cd550b9a7ac7715c37cc2f7d020"
version = "v0.6.1"
[[projects]]
digest = "1:9c883d2e7fd391fbc7db7536863cc4cd181a97646b4659e9a7e7aa583c411885"
name = "github.com/Shopify/sarama"
packages = ["."]
pruneopts = "NUT"
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
version = "v1.17.0"
[[projects]]
digest = "1:b39cf81d5f440b9c0757a25058432d33af867e5201109bf53621356d9dab4b73"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
pruneopts = "NUT"
revision = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33"
version = "v0.12.0"
[[projects]]
digest = "1:7943d65eade11f12eda6d0c0e681197e26cad6bdc0105a0a28c3c5c761c00589"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/route53",
"service/route53/route53iface",
"service/sts",
]
pruneopts = "NUT"
revision = "852052a10992d92f68b9a60862a3312292524903"
version = "v1.14.17"
[[projects]]
digest = "1:22db66c6af48fe0e93e971d796cad565ee9e0039646bd59bc8e29b7a7bd2d24e"
name = "github.com/coreos/etcd"
packages = [
"auth/authpb",
"clientv3",
"etcdserver/api/v3rpc/rpctypes",
"etcdserver/etcdserverpb",
"mvcc/mvccpb",
"pkg/types",
]
pruneopts = "NUT"
revision = "2cf9e51d2a78003b164c2998886158e60ded1cbb"
version = "v3.3.11"
[[projects]]
digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "NUT"
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:1276a1b387cedc23233635146ead785e5438c82526a2ccec4d714b5a0e7883e2"
name = "github.com/dnstap/golang-dnstap"
packages = ["."]
pruneopts = "NUT"
revision = "2cf77a2b5e11ac8d0ba3892772ac8e1f7b528344"
[[projects]]
digest = "1:08143362be979b087c2c1bae5dde986e988d3d5d4dc661727cbe436411b3f33a"
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
pruneopts = "NUT"
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:0b70d299db64766a4ee7128f0f94944f2356ca9d3499c79123dbc3a1d2ba803e"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
pruneopts = "NUT"
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
digest = "1:0d36a2b325b9e75f8057f7f9fbe778d348d70ba652cb9335485b69d1a5c4e038"
name = "github.com/eapache/queue"
packages = ["."]
pruneopts = "NUT"
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
name = "github.com/evanphx/json-patch"
packages = ["."]
pruneopts = "NUT"
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
version = "v4.1.0"
[[projects]]
branch = "master"
digest = "1:5bb547db6b473fe900e68ffbcaf1626d1c7825374cba9b5dd52d5209fd89cca1"
name = "github.com/farsightsec/golang-framestream"
packages = ["."]
pruneopts = "NUT"
revision = "c06a5734334d9629b3db143d74b47eb94ea68612"
[[projects]]
digest = "1:30b09d75e7e16119e85020374df10ea06f263c9bdff72afdc1522838b9609375"
name = "github.com/go-ini/ini"
packages = ["."]
pruneopts = "NUT"
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
digest = "1:341a7df38da99fe91ed40e4008c13cc5d02dcc98ed1a094360cb7d5df26d6d26"
name = "github.com/go-logfmt/logfmt"
packages = ["."]
pruneopts = "NUT"
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
digest = "1:acba7f3d06725b78c6999c2ba6c40b80539c10346fe37b4681072f1666c87d0c"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
]
pruneopts = "NUT"
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
digest = "1:03e14cff610a8a58b774e36bd337fa979482be86aab01be81fb8bbd6d0f07fc8"
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "NUT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "NUT"
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
branch = "master"
digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "NUT"
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
branch = "master"
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "NUT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions",
]
pruneopts = "NUT"
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:8ccbd0e8ef8b6e59b331991fcb5477fb5e6c51b7d1be09789d7dc0a33e95bf6a"
name = "github.com/gophercloud/gophercloud"
packages = [
".",
"openstack",
"openstack/identity/v2/tenants",
"openstack/identity/v2/tokens",
"openstack/identity/v3/tokens",
"openstack/utils",
"pagination",
]
pruneopts = "NUT"
revision = "bfc006765209a570e9a783bd7e4372e24fb72781"
[[projects]]
branch = "master"
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache",
]
pruneopts = "NUT"
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
[[projects]]
branch = "master"
digest = "1:7b760aa6dbe426378f54934270dde1b176fda379111da2154748f030fffe4d3f"
name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"]
pruneopts = "NUT"
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
[[projects]]
branch = "master"
digest = "1:13e2fa5735a82a5fb044f290cfd0dba633d1c5e516b27da0509e0dbb3515a18e"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru",
]
pruneopts = "NUT"
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
digest = "1:65300ccc4bcb38b107b868155c303312978981e56bca707c81efec57575b5e06"
name = "github.com/imdario/mergo"
packages = ["."]
pruneopts = "NUT"
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
version = "v0.3.5"
[[projects]]
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
name = "github.com/jmespath/go-jmespath"
packages = ["."]
pruneopts = "NUT"
revision = "0b12d6b5"
[[projects]]
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "NUT"
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
version = "1.1.4"
[[projects]]
branch = "master"
digest = "1:f91bb67d051a94cf6ab2cf07eee0550432afc31655838d42cdec258db4b348f0"
name = "github.com/kr/logfmt"
packages = ["."]
pruneopts = "NUT"
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
pruneopts = "NUT"
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
name = "github.com/modern-go/concurrent"
packages = ["."]
pruneopts = "NUT"
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6"
name = "github.com/modern-go/reflect2"
packages = ["."]
pruneopts = "NUT"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[[projects]]
branch = "master"
digest = "1:b4f46325dbc2a7489ba33b4bd3cce542a1721611e857c1cb0b0c57775b2ad130"
name = "github.com/opentracing-contrib/go-observer"
packages = ["."]
pruneopts = "NUT"
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
[[projects]]
digest = "1:6e36c3eab25478d363e7eb669f253d9f9a620e640b0e1d86d3e909cbb1a56ab6"
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log",
"mocktracer",
]
pruneopts = "NUT"
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
digest = "1:bf3b3b697c848612d4c87121b189c37f7a7a7b6b7e1ae3bbfbe12574cbebec3d"
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [
".",
"flag",
"thrift/gen-go/scribe",
"thrift/gen-go/zipkincore",
"types",
"wire",
]
pruneopts = "NUT"
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
version = "v0.3.4"
[[projects]]
branch = "master"
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
pruneopts = "NUT"
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
name = "github.com/peterbourgon/diskv"
packages = ["."]
pruneopts = "NUT"
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:bf8b95442fa2d448bde39997d6fde56de40344b422d93cea4deead8de699e190"
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32",
]
pruneopts = "NUT"
revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00"
version = "v2.0.3"
[[projects]]
branch = "master"
digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b"
name = "github.com/prometheus/client_model"
packages = ["go"]
pruneopts = "NUT"
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
digest = "1:768b555b86742de2f28beb37f1dedce9a75f91f871d75b5717c96399c1a78c08"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
]
pruneopts = "NUT"
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
digest = "1:7c522337040d4ec9a136cd9d64fe4677ee1d3eae4a7f8831c2108f9bec43fa48"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
pruneopts = "NUT"
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
digest = "1:a49fa08481484b6a37ee5a641a90d1efb6818972a5de88617e3fe31d240dabfd"
name = "github.com/ugorji/go"
packages = ["codec"]
pruneopts = "NUT"
revision = "9c7f9b7a2bc3a520f7c7b30b34b7f85f47fe27b6"
[[projects]]
branch = "master"
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
[[projects]]
branch = "master"
digest = "1:915e6387683239c2d703df757841d080b2bb08b2a2ddd15592533778acbcbe34"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = "NUT"
revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9"
[[projects]]
branch = "master"
digest = "1:bc2b221d465bb28ce46e8d472ecdc424b9a9b541bd61d8c311c5f29c8dd75b1b"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt",
]
pruneopts = "NUT"
revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
[[projects]]
branch = "master"
digest = "1:baa3044d8a33b3cd690c16fd555c5a543f980206cd1a696f7902307f57f911ec"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "NUT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4"
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
digest = "1:e2da54c7866453ac5831c61c7ec5d887f39328cac088c806553303bff4048e6f"
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch",
]
pruneopts = "NUT"
revision = "ae0ab99deb4dc413a2b4bd6c8bdd0eb67f1e4d06"
version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:601e63e7d4577f907118bec825902505291918859d223bce015539e79f1160e3"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
pruneopts = "NUT"
revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
[[projects]]
digest = "1:6bf78c43ce1ac3523e4923465454a82135eb9b3a4eb9356e1e75a36c416aeaa8"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclog",
"health/grpc_health_v1",
"internal",
"internal/backoff",
"internal/channelz",
"internal/grpcrand",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport",
]
pruneopts = "NUT"
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
version = "v1.13.0"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
name = "gopkg.in/inf.v0"
packages = ["."]
pruneopts = "NUT"
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
digest = "1:7c95b35057a0ff2e19f707173cc1a947fa43a6eb5c4d300d196ece0334046082"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "NUT"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"auditregistration/v1alpha1",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"autoscaling/v2beta2",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"coordination/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"scheduling/v1beta1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1",
]
pruneopts = "NUT"
revision = "89a74a8d264df0e993299876a8cde88379b940ee"
[[projects]]
digest = "1:faec2ad757dee14d4e0416d199aeda57f2a9ab1d3bebb5aa63f18e26f7b6ade4"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
[[projects]]
digest = "1:0035d9ccabe66fa3b7aba7c6c9b656e9910ed462f38992db445e194e8cb27be9"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/auditregistration/v1alpha1",
"kubernetes/typed/auditregistration/v1alpha1/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/autoscaling/v2beta2",
"kubernetes/typed/autoscaling/v2beta2/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/coordination/v1beta1",
"kubernetes/typed/coordination/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
"kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/scheduling/v1beta1",
"kubernetes/typed/scheduling/v1beta1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"plugin/pkg/client/auth/openstack",
"rest",
"rest/watch",
"testing",
"third_party/forked/golang/template",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/connrotation",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/jsonpath",
"util/retry",
]
pruneopts = "NUT"
revision = "e64494209f554a6723674bd494d69445fb76a1d4"
version = "v10.0.0"
[[projects]]
digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "8139d8cb77af419532b33dfa7dd09fbc5f1d344f"
[[projects]]
branch = "master"
digest = "1:a2c842a1e0aed96fd732b535514556323a6f5edfded3b63e5e0ab1bce188aa54"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "NUT"
revision = "9dfdf9be683f61f82cda12362c44c784e0778b56"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/DataDog/dd-trace-go/opentracing",
"github.com/aws/aws-sdk-go/aws",
"github.com/aws/aws-sdk-go/aws/credentials",
"github.com/aws/aws-sdk-go/aws/request",
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/route53",
"github.com/aws/aws-sdk-go/service/route53/route53iface",
"github.com/coreos/etcd/clientv3",
"github.com/coreos/etcd/mvcc/mvccpb",
"github.com/dnstap/golang-dnstap",
"github.com/farsightsec/golang-framestream",
"github.com/golang/protobuf/proto",
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc",
"github.com/matttproud/golang_protobuf_extensions/pbutil",
"github.com/opentracing/opentracing-go",
"github.com/opentracing/opentracing-go/mocktracer",
"github.com/openzipkin/zipkin-go-opentracing",
"github.com/prometheus/client_model/go",
"github.com/prometheus/common/expfmt",
"golang.org/x/sys/unix",
"google.golang.org/grpc",
"google.golang.org/grpc/credentials",
"google.golang.org/grpc/grpclog",
"google.golang.org/grpc/peer",
"k8s.io/api/core/v1",
"k8s.io/apimachinery/pkg/apis/meta/v1",
"k8s.io/apimachinery/pkg/labels",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/runtime/schema",
"k8s.io/apimachinery/pkg/types",
"k8s.io/apimachinery/pkg/watch",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/plugin/pkg/client/auth/oidc",
"k8s.io/client-go/plugin/pkg/client/auth/openstack",
"k8s.io/client-go/rest",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
"k8s.io/client-go/tools/clientcmd/api",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,35 +0,0 @@
ignored = [
"github.com/mholt/caddy",
"github.com/mholt/caddy/caddyfile",
"github.com/mholt/caddy/startupshutdown",
"github.com/mholt/caddy/onevent",
"github.com/miekg/dns",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
]
[prune]
go-tests = true
non-go = true
unused-packages = true
# client-go v10.0.0 uses apimachinery 2b1284ed4c93a43499e781493253e2ac5959c4fd
# and api 89a74a8d264df0e993299876a8cde88379b940ee,
# and introduced klog 8139d8cb77af419532b33dfa7dd09fbc5f1d344f
# and yaml fd68e9863619f6ec2fdd8625fe1f02e7c877e480 (see Godep.json).
# go dep is unable to match Godep.json automatically so have to specify here.
[[constraint]]
name = "k8s.io/client-go"
version = "v10.0.0"
[[override]]
name = "k8s.io/apimachinery"
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
[[override]]
name = "k8s.io/api"
revision = "89a74a8d264df0e993299876a8cde88379b940ee"
[[override]]
name = "k8s.io/klog"
revision = "8139d8cb77af419532b33dfa7dd09fbc5f1d344f"
[[override]]
name = "sigs.k8s.io/yaml"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"

View file

@ -2,7 +2,7 @@
GITCOMMIT:=$(shell git describe --dirty --always)
BINARY:=coredns
SYSTEM:=
CHECKS:=check godeps
CHECKS:=check
BUILDOPTS:=-v
GOPATH?=$(HOME)/go
PRESUBMIT:=core coremain plugin test request
@ -14,45 +14,29 @@ all: coredns
.PHONY: coredns
coredns: $(CHECKS)
CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY)
GO111MODULE=on CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY)
.PHONY: check
check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go godeps
.PHONY: godeps
godeps:
@ # Not vendoring these, so external plugins compile, avoiding:
@ # cannot use c (type *"github.com/mholt/caddy".Controller) as type
@ # *"github.com/coredns/coredns/vendor/github.com/mholt/caddy".Controller like errors.
(cd $(GOPATH)/src/github.com/mholt/caddy 2>/dev/null && git checkout -q master 2>/dev/null || true)
(cd $(GOPATH)/src/github.com/miekg/dns 2>/dev/null && git checkout -q master 2>/dev/null || true)
(cd $(GOPATH)/src/github.com/prometheus/client_golang 2>/dev/null && git checkout -q master 2>/dev/null || true)
go get -u github.com/mholt/caddy
go get -u github.com/miekg/dns
go get -u github.com/prometheus/client_golang/prometheus/promhttp
go get -u github.com/prometheus/client_golang/prometheus
(cd $(GOPATH)/src/github.com/mholt/caddy && git checkout -q v0.11.4)
(cd $(GOPATH)/src/github.com/miekg/dns && git checkout -q v1.1.4)
(cd $(GOPATH)/src/github.com/prometheus/client_golang && git checkout -q v0.9.1)
check: presubmit core/plugin/zplugin.go core/dnsserver/zdirectives.go
.PHONY: travis
travis:
ifeq ($(TEST_TYPE),core)
( cd request ; go test -v -tags 'etcd' -race ./... )
( cd core ; go test -v -tags 'etcd' -race ./... )
( cd coremain ; go test -v -tags 'etcd' -race ./... )
( cd request ; GO111MODULE=on go test -v -tags 'etcd' -race ./... )
( cd core ; GO111MODULE=on go test -v -tags 'etcd' -race ./... )
( cd coremain ; GO111MODULE=on go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),integration)
( cd test ; go test -v -tags 'etcd' -race ./... )
( cd test ; GO111MODULE=on go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),plugin)
( cd plugin ; go test -v -tags 'etcd' -race ./... )
( cd plugin ; GO111MODULE=on go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),coverage)
for d in `go list ./... | grep -v vendor`; do \
t=$$(date +%s); \
go test -i -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
go test -v -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
GO111MODULE=on go test -i -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
GO111MODULE=on go test -v -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
echo "Coverage test $$d took $$(($$(date +%s)-t)) seconds"; \
if [ -f cover.out ]; then \
cat cover.out >> coverage.txt; \
@ -62,11 +46,11 @@ ifeq ($(TEST_TYPE),coverage)
endif
core/plugin/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg
go generate coredns.go
GO111MODULE=on go generate coredns.go
.PHONY: gen
gen:
go generate coredns.go
GO111MODULE=on go generate coredns.go
.PHONY: pb
pb:
@ -79,12 +63,5 @@ presubmit:
.PHONY: clean
clean:
go clean
GO111MODULE=on go clean
rm -f coredns
.PHONY: dep-ensure
dep-ensure:
dep version || go get -u github.com/golang/dep/cmd/dep
dep ensure -v
dep prune -v
find vendor -name '*_test.go' -delete

71
go.mod Normal file
View file

@ -0,0 +1,71 @@
module github.com/coredns/coredns
go 1.12
require (
cloud.google.com/go v0.28.0
github.com/DataDog/dd-trace-go v0.6.1
github.com/Shopify/sarama v1.17.0
github.com/apache/thrift v0.12.0
github.com/aws/aws-sdk-go v1.14.17
github.com/coreos/etcd v3.3.11+incompatible
github.com/davecgh/go-spew v1.1.0
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11
github.com/eapache/go-resiliency v1.1.0
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934
github.com/eapache/queue v1.1.0
github.com/evanphx/json-patch v4.1.0+incompatible
github.com/farsightsec/golang-framestream v0.0.0-20180124174429-c06a5734334d
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/go-ini/ini v1.37.0
github.com/go-logfmt/logfmt v0.3.0
github.com/gogo/protobuf v1.0.0
github.com/golang/protobuf v1.2.0
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf
github.com/google/uuid v1.1.1 // indirect
github.com/googleapis/gnostic v0.2.0
github.com/gophercloud/gophercloud v0.0.0-20180928224355-bfc006765209
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47
github.com/imdario/mergo v0.3.5
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515
github.com/matttproud/golang_protobuf_extensions v1.0.1
github.com/mholt/caddy v0.11.4
github.com/miekg/dns v1.1.4
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492
github.com/opentracing/opentracing-go v1.0.2
github.com/openzipkin/zipkin-go-opentracing v0.3.4
github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c
github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/pierrec/lz4 v2.0.3+incompatible
github.com/prometheus/client_golang v0.9.2
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165
github.com/spf13/pflag v1.0.1
github.com/ugorji/go v0.0.0-20161130061742-9c7f9b7a2bc3
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sys v0.0.0-20180627142611-7138fd3d9dc8
golang.org/x/text v0.3.0
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/appengine v1.2.0
google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d
google.golang.org/grpc v1.13.0
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v2 v2.2.1
k8s.io/api v0.0.0-20181204000039-89a74a8d264d
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93
k8s.io/client-go v10.0.0+incompatible
k8s.io/klog v0.0.0-20181108234604-8139d8cb77af
k8s.io/kube-openapi v0.0.0-20180928202339-9dfdf9be683f
sigs.k8s.io/yaml v1.1.0
)

139
go.sum Normal file
View file

@ -0,0 +1,139 @@
cloud.google.com/go v0.28.0 h1:KZ/88LWSw8NxMkjdQyX7LQSGR9PkHr4PaVuNm8zgFq0=
cloud.google.com/go v0.28.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/DataDog/dd-trace-go v0.6.1 h1:nsZ2lohbSw1CKtfNRu3wPh1jFirv6XSz8vqNpuIYWbM=
github.com/DataDog/dd-trace-go v0.6.1/go.mod h1:SmQTTcC37XMyEm75HV0AWiZIYxDiaNhRi49zorIpW+o=
github.com/Shopify/sarama v1.17.0 h1:Y2/FBwElFVwt7aLKL3fDG6hh+rrlywR6uLgTgKObwTc=
github.com/Shopify/sarama v1.17.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/aws/aws-sdk-go v1.14.17 h1:T6YqsDgkg5WA00ZOpPU701dNhlpcS/HN1cW0VpvG0Mg=
github.com/aws/aws-sdk-go v1.14.17/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/coreos/etcd v3.3.11+incompatible h1:0gCnqKsq7XxMi69JsnbmMc1o+RJH3XH64sV9aiTTYko=
github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4=
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14=
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/farsightsec/golang-framestream v0.0.0-20180124174429-c06a5734334d h1:gD/JsQfPlh2UK5vSWTKt9e9AUpD/qdghdtfPZ/NyCCM=
github.com/farsightsec/golang-framestream v0.0.0-20180124174429-c06a5734334d/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/go-ini/ini v1.37.0 h1:/FpMfveJbc7ExTTDgT5nL9Vw+aZdst/c2dOxC931U+M=
github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a h1:ZJu5NB1Bk5ms4vw0Xu4i+jD32SE9jQXyfnOvwhHqlT0=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.0.0-20180928224355-bfc006765209 h1:Igqoa3ygx1DDGnGsbkKcXwwZt0jzCAbWsFfzIFYLP34=
github.com/gophercloud/gophercloud v0.0.0-20180928224355-bfc006765209/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mholt/caddy v0.11.4 h1:he7Ej5Jf9CXjETtfQQBr5KJ1b5ZWdPaBOJjiQs6LAIk=
github.com/mholt/caddy v0.11.4/go.mod h1:Wb1PlT4DAYSqOEd03MsqkdkXnTxA8v9pKjdpxbqM1kY=
github.com/miekg/dns v1.1.4 h1:rCMZsU2ScVSYcAsOXgmC6+AKOK+6pmQTOcw03nfwYV0=
github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g=
github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE=
github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v2.0.3+incompatible h1:h0ipQUMRrnr+/HHhxhceftyXk4QcZsmxSNliSG75Bi0=
github.com/pierrec/lz4 v2.0.3+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk=
github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/ugorji/go v0.0.0-20161130061742-9c7f9b7a2bc3 h1:KfqgRfYvGRyKOQUx8GphSC8D6vVoBndzco3rESP8I4k=
github.com/ugorji/go v0.0.0-20161130061742-9c7f9b7a2bc3/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8 h1:h7zdf0RiEvWbYBKIx4b+q41xoUVnMmvsGZnIVE5syG8=
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180629035331-4cb1c02c05b0/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180627142611-7138fd3d9dc8 h1:RI4LLZfYDSosZMJ7FzhhEQbwo7tA8Bp9Vhml1PukQsg=
golang.org/x/sys v0.0.0-20180627142611-7138fd3d9dc8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d h1:6X4PYh39/Pjz7al8CnTTsk+jp3fG2KPpusrnwqzAW+M=
google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/api v0.0.0-20181204000039-89a74a8d264d h1:HQoGWsWUe/FmRcX9BU440AAMnzBFEf+DBo4nbkQlNzs=
k8s.io/api v0.0.0-20181204000039-89a74a8d264d/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 h1:tT6oQBi0qwLbbZSfDkdIsb23EwaLY85hoAV4SpXfdao=
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34=
k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/klog v0.0.0-20181108234604-8139d8cb77af h1:s6rm8OxBbyDNSRkpyAd5OL4icUdBICVw9+mFADa+t5E=
k8s.io/klog v0.0.0-20181108234604-8139d8cb77af/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20180928202339-9dfdf9be683f/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

15
vendor/cloud.google.com/go/AUTHORS generated vendored
View file

@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

View file

@ -1,40 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Alexis Hunt <lexer@google.com>
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Mario Castro <mariocaster@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Thanatat Tamtan <acoshift@gmail.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

202
vendor/cloud.google.com/go/LICENSE generated vendored
View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,503 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}

View file

@ -1,24 +0,0 @@
Copyright (c) 2016, Datadog <info@datadoghq.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Datadog nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,2 +0,0 @@
Component,Origin,License,Copyright
import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors
1 Component Origin License Copyright
2 import io.opentracing Apache-2.0 Copyright 2016-2017 The OpenTracing Authors

View file

@ -1,58 +0,0 @@
package opentracing
import (
"os"
"path/filepath"
)
// Configuration struct configures the Datadog tracer. Please use the NewConfiguration
// constructor to begin.
type Configuration struct {
// Enabled, when false, returns a no-op implementation of the Tracer.
Enabled bool
// Debug, when true, writes details to logs.
Debug bool
// ServiceName specifies the name of this application.
ServiceName string
// SampleRate sets the Tracer sample rate (ext/priority.go).
SampleRate float64
// AgentHostname specifies the hostname of the agent where the traces
// are sent to.
AgentHostname string
// AgentPort specifies the port that the agent is listening on.
AgentPort string
// GlobalTags holds a set of tags that will be automatically applied to
// all spans.
GlobalTags map[string]interface{}
// TextMapPropagator is an injector used for Context propagation.
TextMapPropagator Propagator
}
// NewConfiguration creates a `Configuration` object with default values.
func NewConfiguration() *Configuration {
// default service name is the Go binary name
binaryName := filepath.Base(os.Args[0])
// Configuration struct with default values
return &Configuration{
Enabled: true,
Debug: false,
ServiceName: binaryName,
SampleRate: 1,
AgentHostname: "localhost",
AgentPort: "8126",
GlobalTags: make(map[string]interface{}),
TextMapPropagator: NewTextMapPropagator("", "", ""),
}
}
type noopCloser struct{}
func (c *noopCloser) Close() error { return nil }

View file

@ -1,46 +0,0 @@
package opentracing
// SpanContext represents Span state that must propagate to descendant Spans
// and across process boundaries.
type SpanContext struct {
traceID uint64
spanID uint64
parentID uint64
sampled bool
span *Span
baggage map[string]string
}
// ForeachBaggageItem grants access to all baggage items stored in the
// SpanContext
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
for k, v := range c.baggage {
if !handler(k, v) {
break
}
}
}
// WithBaggageItem returns an entirely new SpanContext with the
// given key:value baggage pair set.
func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
var newBaggage map[string]string
if c.baggage == nil {
newBaggage = map[string]string{key: val}
} else {
newBaggage = make(map[string]string, len(c.baggage)+1)
for k, v := range c.baggage {
newBaggage[k] = v
}
newBaggage[key] = val
}
// Use positional parameters so the compiler will help catch new fields.
return SpanContext{
traceID: c.traceID,
spanID: c.spanID,
parentID: c.parentID,
sampled: c.sampled,
span: c.span,
baggage: newBaggage,
}
}

View file

@ -1,4 +0,0 @@
// Package opentracing implements an OpenTracing (http://opentracing.io)
// compatible tracer. A Datadog tracer must be initialized through
// a Configuration object as you can see in the following examples.
package opentracing

View file

@ -1,125 +0,0 @@
package opentracing
import (
"strconv"
"strings"
ot "github.com/opentracing/opentracing-go"
)
// Propagator implementations should be able to inject and extract
// SpanContexts into an implementation specific carrier.
type Propagator interface {
// Inject takes the SpanContext and injects it into the carrier using
// an implementation specific method.
Inject(context ot.SpanContext, carrier interface{}) error
// Extract returns the SpanContext from the given carrier using an
// implementation specific method.
Extract(carrier interface{}) (ot.SpanContext, error)
}
const (
defaultBaggageHeaderPrefix = "ot-baggage-"
defaultTraceIDHeader = "x-datadog-trace-id"
defaultParentIDHeader = "x-datadog-parent-id"
)
// NewTextMapPropagator returns a new propagator which uses opentracing.TextMap
// to inject and extract values. The parameters specify the prefix that will
// be used to prefix baggage header keys along with the trace and parent header.
// Empty strings may be provided to use the defaults, which are: "ot-baggage-" as
// prefix for baggage headers, "x-datadog-trace-id" and "x-datadog-parent-id" for
// trace and parent ID headers.
func NewTextMapPropagator(baggagePrefix, traceHeader, parentHeader string) *TextMapPropagator {
if baggagePrefix == "" {
baggagePrefix = defaultBaggageHeaderPrefix
}
if traceHeader == "" {
traceHeader = defaultTraceIDHeader
}
if parentHeader == "" {
parentHeader = defaultParentIDHeader
}
return &TextMapPropagator{baggagePrefix, traceHeader, parentHeader}
}
// TextMapPropagator implements a propagator which uses opentracing.TextMap
// internally.
type TextMapPropagator struct {
baggagePrefix string
traceHeader string
parentHeader string
}
// Inject defines the TextMapPropagator to propagate SpanContext data
// out of the current process. The implementation propagates the
// TraceID and the current active SpanID, as well as the Span baggage.
func (p *TextMapPropagator) Inject(context ot.SpanContext, carrier interface{}) error {
ctx, ok := context.(SpanContext)
if !ok {
return ot.ErrInvalidSpanContext
}
writer, ok := carrier.(ot.TextMapWriter)
if !ok {
return ot.ErrInvalidCarrier
}
// propagate the TraceID and the current active SpanID
writer.Set(p.traceHeader, strconv.FormatUint(ctx.traceID, 10))
writer.Set(p.parentHeader, strconv.FormatUint(ctx.spanID, 10))
// propagate OpenTracing baggage
for k, v := range ctx.baggage {
writer.Set(p.baggagePrefix+k, v)
}
return nil
}
// Extract implements Propagator.
func (p *TextMapPropagator) Extract(carrier interface{}) (ot.SpanContext, error) {
reader, ok := carrier.(ot.TextMapReader)
if !ok {
return nil, ot.ErrInvalidCarrier
}
var err error
var traceID, parentID uint64
decodedBaggage := make(map[string]string)
// extract SpanContext fields
err = reader.ForeachKey(func(k, v string) error {
switch strings.ToLower(k) {
case p.traceHeader:
traceID, err = strconv.ParseUint(v, 10, 64)
if err != nil {
return ot.ErrSpanContextCorrupted
}
case p.parentHeader:
parentID, err = strconv.ParseUint(v, 10, 64)
if err != nil {
return ot.ErrSpanContextCorrupted
}
default:
lowercaseK := strings.ToLower(k)
if strings.HasPrefix(lowercaseK, p.baggagePrefix) {
decodedBaggage[strings.TrimPrefix(lowercaseK, p.baggagePrefix)] = v
}
}
return nil
})
if err != nil {
return nil, err
}
if traceID == 0 || parentID == 0 {
return nil, ot.ErrSpanContextNotFound
}
return SpanContext{
traceID: traceID,
spanID: parentID,
baggage: decodedBaggage,
}, nil
}

View file

@ -1,151 +0,0 @@
package opentracing
import (
"fmt"
"time"
ddtrace "github.com/DataDog/dd-trace-go/tracer"
ot "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
)
// Span represents an active, un-finished span in the OpenTracing system.
// Spans are created by the Tracer interface.
type Span struct {
*ddtrace.Span
context SpanContext
tracer *Tracer
}
// Tracer provides access to the `Tracer`` that created this Span.
func (s *Span) Tracer() ot.Tracer {
return s.tracer
}
// Context yields the SpanContext for this Span. Note that the return
// value of Context() is still valid after a call to Span.Finish(), as is
// a call to Span.Context() after a call to Span.Finish().
func (s *Span) Context() ot.SpanContext {
return s.context
}
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
// that also propagates to descendants of this Span.
func (s *Span) SetBaggageItem(key, val string) ot.Span {
s.Span.Lock()
defer s.Span.Unlock()
s.context = s.context.WithBaggageItem(key, val)
return s
}
// BaggageItem gets the value for a baggage item given its key. Returns the empty string
// if the value isn't found in this Span.
func (s *Span) BaggageItem(key string) string {
s.Span.Lock()
defer s.Span.Unlock()
return s.context.baggage[key]
}
// SetTag adds a tag to the span, overwriting pre-existing values for
// the given `key`.
func (s *Span) SetTag(key string, value interface{}) ot.Span {
switch key {
case ServiceName:
s.Span.Lock()
defer s.Span.Unlock()
s.Span.Service = fmt.Sprint(value)
case ResourceName:
s.Span.Lock()
defer s.Span.Unlock()
s.Span.Resource = fmt.Sprint(value)
case SpanType:
s.Span.Lock()
defer s.Span.Unlock()
s.Span.Type = fmt.Sprint(value)
case Error:
switch v := value.(type) {
case nil:
// no error
case error:
s.Span.SetError(v)
default:
s.Span.SetError(fmt.Errorf("%v", v))
}
default:
// NOTE: locking is not required because the `SetMeta` is
// already thread-safe
s.Span.SetMeta(key, fmt.Sprint(value))
}
return s
}
// FinishWithOptions is like Finish() but with explicit control over
// timestamps and log data.
func (s *Span) FinishWithOptions(options ot.FinishOptions) {
if options.FinishTime.IsZero() {
options.FinishTime = time.Now().UTC()
}
s.Span.FinishWithTime(options.FinishTime.UnixNano())
}
// SetOperationName sets or changes the operation name.
func (s *Span) SetOperationName(operationName string) ot.Span {
s.Span.Lock()
defer s.Span.Unlock()
s.Span.Name = operationName
return s
}
// LogFields is an efficient and type-checked way to record key:value
// logging data about a Span, though the programming interface is a little
// more verbose than LogKV().
func (s *Span) LogFields(fields ...log.Field) {
// TODO: implementation missing
}
// LogKV is a concise, readable way to record key:value logging data about
// a Span, though unfortunately this also makes it less efficient and less
// type-safe than LogFields().
func (s *Span) LogKV(keyVals ...interface{}) {
// TODO: implementation missing
}
// LogEvent is deprecated: use LogFields or LogKV
func (s *Span) LogEvent(event string) {
// TODO: implementation missing
}
// LogEventWithPayload deprecated: use LogFields or LogKV
func (s *Span) LogEventWithPayload(event string, payload interface{}) {
// TODO: implementation missing
}
// Log is deprecated: use LogFields or LogKV
func (s *Span) Log(data ot.LogData) {
// TODO: implementation missing
}
// NewSpan is the OpenTracing Span constructor
func NewSpan(operationName string) *Span {
span := &ddtrace.Span{
Name: operationName,
}
otSpan := &Span{
Span: span,
context: SpanContext{
traceID: span.TraceID,
spanID: span.SpanID,
parentID: span.ParentID,
sampled: span.Sampled,
},
}
// SpanContext is propagated and used to create children
otSpan.context.span = otSpan
return otSpan
}

View file

@ -1,12 +0,0 @@
package opentracing
const (
// SpanType defines the Span type (web, db, cache)
SpanType = "span.type"
// ServiceName defines the Service name for this Span
ServiceName = "service.name"
// ResourceName defines the Resource name for the Span
ResourceName = "resource.name"
// Error defines an error.
Error = "error.error"
)

View file

@ -1,178 +0,0 @@
package opentracing
import (
"errors"
"io"
"time"
ddtrace "github.com/DataDog/dd-trace-go/tracer"
ot "github.com/opentracing/opentracing-go"
)
// Tracer is a simple, thin interface for Span creation and SpanContext
// propagation. In the current state, this Tracer is a compatibility layer
// that wraps the Datadog Tracer implementation.
type Tracer struct {
// impl is the Datadog Tracer implementation.
impl *ddtrace.Tracer
// config holds the Configuration used to create the Tracer.
config *Configuration
}
// StartSpan creates, starts, and returns a new Span with the given `operationName`
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
// opentracing.FollowsFrom()) becomes the root of its own trace.
func (t *Tracer) StartSpan(operationName string, options ...ot.StartSpanOption) ot.Span {
sso := ot.StartSpanOptions{}
for _, o := range options {
o.Apply(&sso)
}
return t.startSpanWithOptions(operationName, sso)
}
func (t *Tracer) startSpanWithOptions(operationName string, options ot.StartSpanOptions) ot.Span {
if options.StartTime.IsZero() {
options.StartTime = time.Now().UTC()
}
var context SpanContext
var hasParent bool
var parent *Span
var span *ddtrace.Span
for _, ref := range options.References {
ctx, ok := ref.ReferencedContext.(SpanContext)
if !ok {
// ignore the SpanContext since it's not valid
continue
}
// if we have parenting define it
if ref.Type == ot.ChildOfRef {
hasParent = true
context = ctx
parent = ctx.span
}
}
if parent == nil {
// create a root Span with the default service name and resource
span = t.impl.NewRootSpan(operationName, t.config.ServiceName, operationName)
if hasParent {
// the Context doesn't have a Span reference because it
// has been propagated from another process, so we set these
// values manually
span.TraceID = context.traceID
span.ParentID = context.spanID
t.impl.Sample(span)
}
} else {
// create a child Span that inherits from a parent
span = t.impl.NewChildSpan(operationName, parent.Span)
}
// create an OpenTracing compatible Span; the SpanContext has a
// back-reference that is used for parent-child hierarchy
otSpan := &Span{
Span: span,
context: SpanContext{
traceID: span.TraceID,
spanID: span.SpanID,
parentID: span.ParentID,
sampled: span.Sampled,
},
tracer: t,
}
otSpan.context.span = otSpan
// set start time
otSpan.Span.Start = options.StartTime.UnixNano()
if parent != nil {
// propagate baggage items
if l := len(parent.context.baggage); l > 0 {
otSpan.context.baggage = make(map[string]string, len(parent.context.baggage))
for k, v := range parent.context.baggage {
otSpan.context.baggage[k] = v
}
}
}
// add tags from options
for k, v := range options.Tags {
otSpan.SetTag(k, v)
}
// add global tags
for k, v := range t.config.GlobalTags {
otSpan.SetTag(k, v)
}
return otSpan
}
// Inject takes the `sm` SpanContext instance and injects it for
// propagation within `carrier`. The actual type of `carrier` depends on
// the value of `format`. Currently supported Injectors are:
// * `TextMap`
// * `HTTPHeaders`
func (t *Tracer) Inject(ctx ot.SpanContext, format interface{}, carrier interface{}) error {
switch format {
case ot.TextMap, ot.HTTPHeaders:
return t.config.TextMapPropagator.Inject(ctx, carrier)
}
return ot.ErrUnsupportedFormat
}
// Extract returns a SpanContext instance given `format` and `carrier`.
func (t *Tracer) Extract(format interface{}, carrier interface{}) (ot.SpanContext, error) {
switch format {
case ot.TextMap, ot.HTTPHeaders:
return t.config.TextMapPropagator.Extract(carrier)
}
return nil, ot.ErrUnsupportedFormat
}
// Close method implements `io.Closer` interface to graceful shutdown the Datadog
// Tracer. Note that this is a blocking operation that waits for the flushing Go
// routine.
func (t *Tracer) Close() error {
t.impl.Stop()
return nil
}
// NewTracer uses a Configuration object to initialize a Datadog Tracer.
// The initialization returns a `io.Closer` that can be used to graceful
// shutdown the tracer. If the configuration object defines a disabled
// Tracer, a no-op implementation is returned.
func NewTracer(config *Configuration) (ot.Tracer, io.Closer, error) {
if config.ServiceName == "" {
// abort initialization if a `ServiceName` is not defined
return nil, nil, errors.New("A Datadog Tracer requires a valid `ServiceName` set")
}
if config.Enabled == false {
// return a no-op implementation so Datadog provides the minimum overhead
return &ot.NoopTracer{}, &noopCloser{}, nil
}
// configure a Datadog Tracer
transport := ddtrace.NewTransport(config.AgentHostname, config.AgentPort)
tracer := &Tracer{
impl: ddtrace.NewTracerTransport(transport),
config: config,
}
tracer.impl.SetDebugLogging(config.Debug)
tracer.impl.SetSampleRate(config.SampleRate)
// set the new Datadog Tracer as a `DefaultTracer` so it can be
// used in integrations. NOTE: this is a temporary implementation
// that can be removed once all integrations have been migrated
// to the OpenTracing API.
ddtrace.DefaultTracer = tracer.impl
return tracer, tracer, nil
}

View file

@ -1,127 +0,0 @@
package tracer
import (
"sync"
)
const (
// spanBufferDefaultInitSize is the initial size of our trace buffer,
// by default we allocate for a handful of spans within the trace,
// reasonable as span is actually way bigger, and avoids re-allocating
// over and over. Could be fine-tuned at runtime.
spanBufferDefaultInitSize = 10
// spanBufferDefaultMaxSize is the maximum number of spans we keep in memory.
// This is to avoid memory leaks, if above that value, spans are randomly
// dropped and ignore, resulting in corrupted tracing data, but ensuring
// original program continues to work as expected.
spanBufferDefaultMaxSize = 1e5
)
type spanBuffer struct {
channels tracerChans
spans []*Span
finishedSpans int
initSize int
maxSize int
sync.RWMutex
}
func newSpanBuffer(channels tracerChans, initSize, maxSize int) *spanBuffer {
if initSize <= 0 {
initSize = spanBufferDefaultInitSize
}
if maxSize <= 0 {
maxSize = spanBufferDefaultMaxSize
}
return &spanBuffer{
channels: channels,
initSize: initSize,
maxSize: maxSize,
}
}
func (tb *spanBuffer) Push(span *Span) {
if tb == nil {
return
}
tb.Lock()
defer tb.Unlock()
if len(tb.spans) > 0 {
// if spanBuffer is full, forget span
if len(tb.spans) >= tb.maxSize {
tb.channels.pushErr(&errorSpanBufFull{Len: len(tb.spans)})
return
}
// if there's a trace ID mismatch, ignore span
if tb.spans[0].TraceID != span.TraceID {
tb.channels.pushErr(&errorTraceIDMismatch{Expected: tb.spans[0].TraceID, Actual: span.TraceID})
return
}
}
if tb.spans == nil {
tb.spans = make([]*Span, 0, tb.initSize)
}
tb.spans = append(tb.spans, span)
}
func (tb *spanBuffer) flushable() bool {
tb.RLock()
defer tb.RUnlock()
if len(tb.spans) == 0 {
return false
}
return tb.finishedSpans == len(tb.spans)
}
func (tb *spanBuffer) ack() {
tb.Lock()
defer tb.Unlock()
tb.finishedSpans++
}
func (tb *spanBuffer) doFlush() {
if !tb.flushable() {
return
}
tb.Lock()
defer tb.Unlock()
tb.channels.pushTrace(tb.spans)
tb.spans = nil
tb.finishedSpans = 0 // important, because a buffer can be used for several flushes
}
func (tb *spanBuffer) Flush() {
if tb == nil {
return
}
tb.doFlush()
}
func (tb *spanBuffer) AckFinish() {
if tb == nil {
return
}
tb.ack()
tb.doFlush()
}
func (tb *spanBuffer) Len() int {
if tb == nil {
return 0
}
tb.RLock()
defer tb.RUnlock()
return len(tb.spans)
}

View file

@ -1,88 +0,0 @@
package tracer
const (
// traceChanLen is the capacity of the trace channel. This channels is emptied
// on a regular basis (worker thread) or when it reaches 50% of its capacity.
// If it's full, then data is simply dropped and ignored, with a log message.
// This only happens under heavy load,
traceChanLen = 1000
// serviceChanLen is the length of the service channel. As for the trace channel,
// it's emptied by worker thread or when it reaches 50%. Note that there should
// be much less data here, as service data does not be to be updated that often.
serviceChanLen = 50
// errChanLen is the number of errors we keep in the error channel. When this
// one is full, errors are just ignored, dropped, nothing left. At some point,
// there's already a whole lot of errors in the backlog, there's no real point
// in keeping millions of errors, a representative sample is enough. And we
// don't want to block user code and/or bloat memory or log files with redundant data.
errChanLen = 200
)
// traceChans holds most tracer channels together, it's mostly used to
// pass them together to the span buffer/context. It's obviously safe
// to access it concurrently as it contains channels only. And it's convenient
// to have it isolated from tracer, for the sake of unit testing.
type tracerChans struct {
trace chan []*Span
service chan Service
err chan error
traceFlush chan struct{}
serviceFlush chan struct{}
errFlush chan struct{}
}
func newTracerChans() tracerChans {
return tracerChans{
trace: make(chan []*Span, traceChanLen),
service: make(chan Service, serviceChanLen),
err: make(chan error, errChanLen),
traceFlush: make(chan struct{}, 1),
serviceFlush: make(chan struct{}, 1),
errFlush: make(chan struct{}, 1),
}
}
func (tc *tracerChans) pushTrace(trace []*Span) {
if len(tc.trace) >= cap(tc.trace)/2 { // starts being full, anticipate, try and flush soon
select {
case tc.traceFlush <- struct{}{}:
default: // a flush was already requested, skip
}
}
select {
case tc.trace <- trace:
default: // never block user code
tc.pushErr(&errorTraceChanFull{Len: len(tc.trace)})
}
}
func (tc *tracerChans) pushService(service Service) {
if len(tc.service) >= cap(tc.service)/2 { // starts being full, anticipate, try and flush soon
select {
case tc.serviceFlush <- struct{}{}:
default: // a flush was already requested, skip
}
}
select {
case tc.service <- service:
default: // never block user code
tc.pushErr(&errorServiceChanFull{Len: len(tc.service)})
}
}
func (tc *tracerChans) pushErr(err error) {
if len(tc.err) >= cap(tc.err)/2 { // starts being full, anticipate, try and flush soon
select {
case tc.errFlush <- struct{}{}:
default: // a flush was already requested, skip
}
}
select {
case tc.err <- err:
default:
// OK, if we get this, our error error buffer is full,
// we can assume it is filled with meaningful messages which
// are going to be logged and hopefully read, nothing better
// we can do, blocking would make things worse.
}
}

View file

@ -1,42 +0,0 @@
package tracer
import (
"context"
)
var spanKey = "datadog_trace_span"
// ContextWithSpan will return a new context that includes the given span.
// DEPRECATED: use span.Context(ctx) instead.
func ContextWithSpan(ctx context.Context, span *Span) context.Context {
if span == nil {
return ctx
}
return span.Context(ctx)
}
// SpanFromContext returns the stored *Span from the Context if it's available.
// This helper returns also the ok value that is true if the span is present.
func SpanFromContext(ctx context.Context) (*Span, bool) {
if ctx == nil {
return nil, false
}
span, ok := ctx.Value(spanKey).(*Span)
return span, ok
}
// SpanFromContextDefault returns the stored *Span from the Context. If not, it
// will return an empty span that will do nothing.
func SpanFromContextDefault(ctx context.Context) *Span {
// FIXME[matt] is it better to return a singleton empty span?
if ctx == nil {
return &Span{}
}
span, ok := SpanFromContext(ctx)
if !ok {
return &Span{}
}
return span
}

View file

@ -1,19 +0,0 @@
// Package tracer contains Datadog's tracing client. It is used to trace
// requests as they flow across web servers, databases and microservices so
// that developers have visibility into bottlenecks and troublesome
// requests.
//
// Package tracer has two core objects: Tracers and Spans. Spans represent
// a chunk of computation time. They have names, durations, timestamps and
// other metadata. Tracers are used to create hierarchies of spans in a
// request, buffer and submit them to the server.
//
// The tracing client can perform trace sampling. While the trace agent
// already samples traces to reduce bandwidth usage, client sampling reduces
// performance overhead.
//
// To enable APM and/or tracing of supported integrations, follow the instructions for
// the appropriate package: https://godoc.org/github.com/DataDog/dd-trace-go/tracer#pkg-subdirectories
//
// Sample code is available in the two examples below:
package tracer

View file

@ -1,114 +0,0 @@
package tracer
import (
"bytes"
"encoding/json"
"github.com/ugorji/go/codec"
)
const (
jsonContentType = "application/json"
msgpackContentType = "application/msgpack"
)
// Encoder is a generic interface that expects encoding methods for traces and
// services, and a Read() method that will be used by the http handler
type Encoder interface {
EncodeTraces(traces [][]*Span) error
EncodeServices(services map[string]Service) error
Read(p []byte) (int, error)
ContentType() string
}
var mh codec.MsgpackHandle
// msgpackEncoder encodes a list of traces in Msgpack format
type msgpackEncoder struct {
buffer *bytes.Buffer
encoder *codec.Encoder
contentType string
}
func newMsgpackEncoder() *msgpackEncoder {
buffer := &bytes.Buffer{}
encoder := codec.NewEncoder(buffer, &mh)
return &msgpackEncoder{
buffer: buffer,
encoder: encoder,
contentType: msgpackContentType,
}
}
// EncodeTraces serializes the given trace list into the internal buffer,
// returning the error if any.
func (e *msgpackEncoder) EncodeTraces(traces [][]*Span) error {
return e.encoder.Encode(traces)
}
// EncodeServices serializes a service map into the internal buffer.
func (e *msgpackEncoder) EncodeServices(services map[string]Service) error {
return e.encoder.Encode(services)
}
// Read values from the internal buffer
func (e *msgpackEncoder) Read(p []byte) (int, error) {
return e.buffer.Read(p)
}
// ContentType return the msgpackEncoder content-type
func (e *msgpackEncoder) ContentType() string {
return e.contentType
}
// jsonEncoder encodes a list of traces in JSON format
type jsonEncoder struct {
buffer *bytes.Buffer
encoder *json.Encoder
contentType string
}
// newJSONEncoder returns a new encoder for the JSON format.
func newJSONEncoder() *jsonEncoder {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
return &jsonEncoder{
buffer: buffer,
encoder: encoder,
contentType: jsonContentType,
}
}
// EncodeTraces serializes the given trace list into the internal buffer,
// returning the error if any.
func (e *jsonEncoder) EncodeTraces(traces [][]*Span) error {
return e.encoder.Encode(traces)
}
// EncodeServices serializes a service map into the internal buffer.
func (e *jsonEncoder) EncodeServices(services map[string]Service) error {
return e.encoder.Encode(services)
}
// Read values from the internal buffer
func (e *jsonEncoder) Read(p []byte) (int, error) {
return e.buffer.Read(p)
}
// ContentType return the jsonEncoder content-type
func (e *jsonEncoder) ContentType() string {
return e.contentType
}
// encoderFactory will provide a new encoder each time we want to flush traces or services.
type encoderFactory func() Encoder
func jsonEncoderFactory() Encoder {
return newJSONEncoder()
}
func msgpackEncoderFactory() Encoder {
return newMsgpackEncoder()
}

View file

@ -1,157 +0,0 @@
package tracer
import (
"log"
"strconv"
)
const (
errorPrefix = "Datadog Tracer Error: "
)
// errorSpanBufFull is raised when there's no more room in the buffer
type errorSpanBufFull struct {
// Len is the length of the buffer (which is full)
Len int
}
// Error provides a readable error message.
func (e *errorSpanBufFull) Error() string {
return "span buffer is full (length: " + strconv.Itoa(e.Len) + ")"
}
// errorTraceChanFull is raised when there's no more room in the channel
type errorTraceChanFull struct {
// Len is the length of the channel (which is full)
Len int
}
// Error provides a readable error message.
func (e *errorTraceChanFull) Error() string {
return "trace channel is full (length: " + strconv.Itoa(e.Len) + ")"
}
// errorServiceChanFull is raised when there's no more room in the channel
type errorServiceChanFull struct {
// Len is the length of the channel (which is full)
Len int
}
// Error provides a readable error message.
func (e *errorServiceChanFull) Error() string {
return "service channel is full (length: " + strconv.Itoa(e.Len) + ")"
}
// errorTraceIDMismatch is raised when a trying to put a span in the wrong place.
type errorTraceIDMismatch struct {
// Expected is the trace ID we should have.
Expected uint64
// Actual is the trace ID we have and is wrong.
Actual uint64
}
// Error provides a readable error message.
func (e *errorTraceIDMismatch) Error() string {
return "trace ID mismatch (expected: " +
strconv.FormatUint(e.Expected, 16) +
" actual: " +
strconv.FormatUint(e.Actual, 16) +
")"
}
// errorNoSpanBuf is raised when trying to finish/push a span that has no buffer associated to it.
type errorNoSpanBuf struct {
// SpanName is the name of the span which could not be pushed (hint for the log reader).
SpanName string
}
// Error provides a readable error message.
func (e *errorNoSpanBuf) Error() string {
return "no span buffer (span name: '" + e.SpanName + "')"
}
// errorFlushLostTraces is raised when trying to finish/push a span that has no buffer associated to it.
type errorFlushLostTraces struct {
// Nb is the number of traces lost in that flush
Nb int
}
// Error provides a readable error message.
func (e *errorFlushLostTraces) Error() string {
return "unable to flush traces, lost " + strconv.Itoa(e.Nb) + " traces"
}
// errorFlushLostServices is raised when trying to finish/push a span that has no buffer associated to it.
type errorFlushLostServices struct {
// Nb is the number of services lost in that flush
Nb int
}
// Error provides a readable error message.
func (e *errorFlushLostServices) Error() string {
return "unable to flush services, lost " + strconv.Itoa(e.Nb) + " services"
}
type errorSummary struct {
Count int
Example string
}
// errorKey returns a unique key for each error type
func errorKey(err error) string {
if err == nil {
return ""
}
switch err.(type) {
case *errorSpanBufFull:
return "ErrorSpanBufFull"
case *errorTraceChanFull:
return "ErrorTraceChanFull"
case *errorServiceChanFull:
return "ErrorServiceChanFull"
case *errorTraceIDMismatch:
return "ErrorTraceIDMismatch"
case *errorNoSpanBuf:
return "ErrorNoSpanBuf"
case *errorFlushLostTraces:
return "ErrorFlushLostTraces"
case *errorFlushLostServices:
return "ErrorFlushLostServices"
}
return err.Error() // possibly high cardinality, but this is unexpected
}
func aggregateErrors(errChan <-chan error) map[string]errorSummary {
errs := make(map[string]errorSummary, len(errChan))
for {
select {
case err := <-errChan:
if err != nil { // double-checking, we don't want to panic here...
key := errorKey(err)
summary := errs[key]
summary.Count++
summary.Example = err.Error()
errs[key] = summary
}
default: // stop when there's no more data
return errs
}
}
}
// logErrors logs the errors, preventing log file flooding, when there
// are many messages, it caps them and shows a quick summary.
// As of today it only logs using standard golang log package, but
// later we could send those stats to agent [TODO:christian].
func logErrors(errChan <-chan error) {
errs := aggregateErrors(errChan)
for _, v := range errs {
var repeat string
if v.Count > 1 {
repeat = " (repeated " + strconv.Itoa(v.Count) + " times)"
}
log.Println(errorPrefix + v.Example + repeat)
}
}

View file

@ -1,9 +0,0 @@
package ext
// Application types for services.
const (
AppTypeWeb = "web"
AppTypeDB = "db"
AppTypeCache = "cache"
AppTypeRPC = "rpc"
)

View file

@ -1,11 +0,0 @@
package ext
const (
CassandraType = "cassandra"
CassandraQuery = "cassandra.query"
CassandraConsistencyLevel = "cassandra.consistency_level"
CassandraCluster = "cassandra.cluster"
CassandraRowCount = "cassandra.row_count"
CassandraKeyspace = "cassandra.keyspace"
CassandraPaginated = "cassandra.paginated"
)

View file

@ -1,8 +0,0 @@
package ext
// Standard error message metadata fields.
const (
ErrorMsg = "error.msg"
ErrorType = "error.type"
ErrorStack = "error.stack"
)

View file

@ -1,9 +0,0 @@
package ext
// HTTP meta constants.
const (
HTTPType = "http"
HTTPMethod = "http.method"
HTTPCode = "http.status_code"
HTTPURL = "http.url"
)

View file

@ -1,6 +0,0 @@
package ext
const (
TargetHost = "out.host"
TargetPort = "out.port"
)

View file

@ -1,19 +0,0 @@
package ext
// Priority is a hint given to the backend so that it knows which traces to reject or kept.
// In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective.
const (
// PriorityUserReject informs the backend that a trace should be rejected and not stored.
// This should be used by user code overriding default priority.
PriorityUserReject = -1
// PriorityAutoReject informs the backend that a trace should be rejected and not stored.
// This is used by the builtin sampler.
PriorityAutoReject = 0
// PriorityAutoReject informs the backend that a trace should be kept and not stored.
// This is used by the builtin sampler.
PriorityAutoKeep = 1
// PriorityUserReject informs the backend that a trace should be kept and not stored.
// This should be used by user code overriding default priority.
PriorityUserKeep = 2
)

View file

@ -1,6 +0,0 @@
package ext
const (
SQLType = "sql"
SQLQuery = "sql.query"
)

View file

@ -1,7 +0,0 @@
package ext
// Standard system metadata names
const (
// The pid of the traced process
Pid = "system.pid"
)

View file

@ -1,14 +0,0 @@
package ext
import (
"runtime"
"strings"
)
const (
Lang = "go"
Interpreter = runtime.Compiler + "-" + runtime.GOARCH + "-" + runtime.GOOS
TracerVersion = "v0.5.0"
)
var LangVersion = strings.TrimPrefix(runtime.Version(), Lang)

View file

@ -1,50 +0,0 @@
package tracer
import (
cryptorand "crypto/rand"
"log"
"math"
"math/big"
"math/rand"
"sync"
"time"
)
// randGen is the global thread safe random number generator
var randGen *rand.Rand
type randSource struct {
source rand.Source
sync.Mutex
}
func newRandSource() *randSource {
var seed int64
max := big.NewInt(math.MaxInt64)
n, err := cryptorand.Int(cryptorand.Reader, max)
if err == nil {
seed = n.Int64()
} else {
log.Printf("%scannot generate random seed: %v; using current time\n", errorPrefix, err)
seed = time.Now().UnixNano()
}
source := rand.NewSource(seed)
return &randSource{source: source}
}
func (rs *randSource) Int63() int64 {
rs.Lock()
n := rs.source.Int63()
rs.Unlock()
return n
}
func (rs *randSource) Seed(seed int64) {
rs.Lock()
rs.Seed(seed)
rs.Unlock()
}

View file

@ -1,57 +0,0 @@
package tracer
const (
// sampleRateMetricKey is the metric key holding the applied sample rate. Has to be the same as the Agent.
sampleRateMetricKey = "_sample_rate"
// constants used for the Knuth hashing, same constants as the Agent.
maxTraceID = ^uint64(0)
maxTraceIDFloat = float64(maxTraceID)
samplerHasher = uint64(1111111111111111111)
)
// sampler is the generic interface of any sampler
type sampler interface {
Sample(span *Span) // Tells if a trace is sampled and sets `span.Sampled`
}
// allSampler samples all the traces
type allSampler struct{}
func newAllSampler() *allSampler {
return &allSampler{}
}
// Sample samples a span
func (s *allSampler) Sample(span *Span) {
// Nothing to do here, since by default a trace is sampled
}
// rateSampler samples from a sample rate
type rateSampler struct {
SampleRate float64
}
// newRateSampler returns an initialized rateSampler with its sample rate
func newRateSampler(sampleRate float64) *rateSampler {
return &rateSampler{
SampleRate: sampleRate,
}
}
// Sample samples a span
func (s *rateSampler) Sample(span *Span) {
if s.SampleRate < 1 {
span.Sampled = sampleByRate(span.TraceID, s.SampleRate)
span.SetMetric(sampleRateMetricKey, s.SampleRate)
}
}
// sampleByRate tells if a trace (from its ID) with a given rate should be sampled.
// Its implementation has to be the same as the Trace Agent.
func sampleByRate(traceID uint64, sampleRate float64) bool {
if sampleRate < 1 {
return traceID*samplerHasher < uint64(sampleRate*maxTraceIDFloat)
}
return true
}

View file

@ -1,338 +0,0 @@
package tracer
import (
"context"
"fmt"
"reflect"
"runtime/debug"
"strings"
"sync"
"time"
)
const (
errorMsgKey = "error.msg"
errorTypeKey = "error.type"
errorStackKey = "error.stack"
samplingPriorityKey = "_sampling_priority_v1"
)
// Span represents a computation. Callers must call Finish when a span is
// complete to ensure it's submitted.
//
// span := tracer.NewRootSpan("web.request", "datadog.com", "/user/{id}")
// defer span.Finish() // or FinishWithErr(err)
//
// In general, spans should be created with the tracer.NewSpan* functions,
// so they will be submitted on completion.
type Span struct {
// Name is the name of the operation being measured. Some examples
// might be "http.handler", "fileserver.upload" or "video.decompress".
// Name should be set on every span.
Name string `json:"name"`
// Service is the name of the process doing a particular job. Some
// examples might be "user-database" or "datadog-web-app". Services
// will be inherited from parents, so only set this in your app's
// top level span.
Service string `json:"service"`
// Resource is a query to a service. A web application might use
// resources like "/user/{user_id}". A sql database might use resources
// like "select * from user where id = ?".
//
// You can track thousands of resources (not millions or billions) so
// prefer normalized resources like "/user/{id}" to "/user/123".
//
// Resources should only be set on an app's top level spans.
Resource string `json:"resource"`
Type string `json:"type"` // protocol associated with the span
Start int64 `json:"start"` // span start time expressed in nanoseconds since epoch
Duration int64 `json:"duration"` // duration of the span expressed in nanoseconds
Meta map[string]string `json:"meta,omitempty"` // arbitrary map of metadata
Metrics map[string]float64 `json:"metrics,omitempty"` // arbitrary map of numeric metrics
SpanID uint64 `json:"span_id"` // identifier of this span
TraceID uint64 `json:"trace_id"` // identifier of the root span
ParentID uint64 `json:"parent_id"` // identifier of the span's direct parent
Error int32 `json:"error"` // error status of the span; 0 means no errors
Sampled bool `json:"-"` // if this span is sampled (and should be kept/recorded) or not
sync.RWMutex
tracer *Tracer // the tracer that generated this span
finished bool // true if the span has been submitted to a tracer.
// parent contains a link to the parent. In most cases, ParentID can be inferred from this.
// However, ParentID can technically be overridden (typical usage: distributed tracing)
// and also, parent == nil is used to identify root and top-level ("local root") spans.
parent *Span
buffer *spanBuffer
}
// NewSpan creates a new span. This is a low-level function, required for testing and advanced usage.
// Most of the time one should prefer the Tracer NewRootSpan or NewChildSpan methods.
func NewSpan(name, service, resource string, spanID, traceID, parentID uint64, tracer *Tracer) *Span {
return &Span{
Name: name,
Service: service,
Resource: resource,
Meta: tracer.getAllMeta(),
SpanID: spanID,
TraceID: traceID,
ParentID: parentID,
Start: now(),
Sampled: true,
tracer: tracer,
}
}
// setMeta adds an arbitrary meta field to the current Span. The span
// must be locked outside of this function
func (s *Span) setMeta(key, value string) {
if s == nil {
return
}
// We don't lock spans when flushing, so we could have a data race when
// modifying a span as it's being flushed. This protects us against that
// race, since spans are marked `finished` before we flush them.
if s.finished {
return
}
if s.Meta == nil {
s.Meta = make(map[string]string)
}
s.Meta[key] = value
}
// SetMeta adds an arbitrary meta field to the current Span.
// If the Span has been finished, it will not be modified by the method.
func (s *Span) SetMeta(key, value string) {
if s == nil {
return
}
s.Lock()
defer s.Unlock()
s.setMeta(key, value)
}
// SetMetas adds arbitrary meta fields from a given map to the current Span.
// If the Span has been finished, it will not be modified by the method.
func (s *Span) SetMetas(metas map[string]string) {
for k, v := range metas {
s.SetMeta(k, v)
}
}
// GetMeta will return the value for the given tag or the empty string if it
// doesn't exist.
func (s *Span) GetMeta(key string) string {
if s == nil {
return ""
}
s.RLock()
defer s.RUnlock()
if s.Meta == nil {
return ""
}
return s.Meta[key]
}
// SetMetrics adds a metric field to the current Span.
// DEPRECATED: Use SetMetric
func (s *Span) SetMetrics(key string, value float64) {
if s == nil {
return
}
s.SetMetric(key, value)
}
// SetMetric sets a float64 value for the given key. It acts
// like `set_meta()` and it simply add a tag without further processing.
// This method doesn't create a Datadog metric.
func (s *Span) SetMetric(key string, val float64) {
if s == nil {
return
}
s.Lock()
defer s.Unlock()
// We don't lock spans when flushing, so we could have a data race when
// modifying a span as it's being flushed. This protects us against that
// race, since spans are marked `finished` before we flush them.
if s.finished {
return
}
if s.Metrics == nil {
s.Metrics = make(map[string]float64)
}
s.Metrics[key] = val
}
// SetError stores an error object within the span meta. The Error status is
// updated and the error.Error() string is included with a default meta key.
// If the Span has been finished, it will not be modified by this method.
func (s *Span) SetError(err error) {
if err == nil || s == nil {
return
}
s.Lock()
defer s.Unlock()
// We don't lock spans when flushing, so we could have a data race when
// modifying a span as it's being flushed. This protects us against that
// race, since spans are marked `finished` before we flush them.
if s.finished {
return
}
s.Error = 1
s.setMeta(errorMsgKey, err.Error())
s.setMeta(errorTypeKey, reflect.TypeOf(err).String())
stack := debug.Stack()
s.setMeta(errorStackKey, string(stack))
}
// Finish closes this Span (but not its children) providing the duration
// of this part of the tracing session. This method is idempotent so
// calling this method multiple times is safe and doesn't update the
// current Span. Once a Span has been finished, methods that modify the Span
// will become no-ops.
func (s *Span) Finish() {
s.finish(now())
}
// FinishWithTime closes this Span at the given `finishTime`. The
// behavior is the same as `Finish()`.
func (s *Span) FinishWithTime(finishTime int64) {
s.finish(finishTime)
}
func (s *Span) finish(finishTime int64) {
if s == nil {
return
}
s.Lock()
finished := s.finished
if !finished {
if s.Duration == 0 {
s.Duration = finishTime - s.Start
}
s.finished = true
}
s.Unlock()
if finished {
// no-op, called twice, no state change...
return
}
if s.buffer == nil {
if s.tracer != nil {
s.tracer.channels.pushErr(&errorNoSpanBuf{SpanName: s.Name})
}
return
}
// If tracer is explicitely disabled, stop now
if s.tracer != nil && !s.tracer.Enabled() {
return
}
// If not sampled, drop it
if !s.Sampled {
return
}
s.buffer.AckFinish() // put data in channel only if trace is completely finished
// It's important that when Finish() exits, the data is put in
// the channel for real, when the trace is finished.
// Otherwise, tests could become flaky (because you never know in what state
// the channel is).
}
// FinishWithErr marks a span finished and sets the given error if it's
// non-nil.
func (s *Span) FinishWithErr(err error) {
if s == nil {
return
}
s.SetError(err)
s.Finish()
}
// String returns a human readable representation of the span. Not for
// production, just debugging.
func (s *Span) String() string {
lines := []string{
fmt.Sprintf("Name: %s", s.Name),
fmt.Sprintf("Service: %s", s.Service),
fmt.Sprintf("Resource: %s", s.Resource),
fmt.Sprintf("TraceID: %d", s.TraceID),
fmt.Sprintf("SpanID: %d", s.SpanID),
fmt.Sprintf("ParentID: %d", s.ParentID),
fmt.Sprintf("Start: %s", time.Unix(0, s.Start)),
fmt.Sprintf("Duration: %s", time.Duration(s.Duration)),
fmt.Sprintf("Error: %d", s.Error),
fmt.Sprintf("Type: %s", s.Type),
"Tags:",
}
s.RLock()
for key, val := range s.Meta {
lines = append(lines, fmt.Sprintf("\t%s:%s", key, val))
}
s.RUnlock()
return strings.Join(lines, "\n")
}
// Context returns a copy of the given context that includes this span.
// This span can be accessed downstream with SpanFromContext and friends.
func (s *Span) Context(ctx context.Context) context.Context {
if s == nil {
return ctx
}
return context.WithValue(ctx, spanKey, s)
}
// Tracer returns the tracer that created this span.
func (s *Span) Tracer() *Tracer {
if s == nil {
return nil
}
return s.tracer
}
// SetSamplingPriority sets the sampling priority.
func (s *Span) SetSamplingPriority(priority int) {
s.SetMetric(samplingPriorityKey, float64(priority))
}
// HasSamplingPriority returns true if sampling priority is set.
// It can be defined to either zero or non-zero.
func (s *Span) HasSamplingPriority() bool {
_, hasSamplingPriority := s.Metrics[samplingPriorityKey]
return hasSamplingPriority
}
// GetSamplingPriority gets the sampling priority.
func (s *Span) GetSamplingPriority() int {
return int(s.Metrics[samplingPriorityKey])
}
// NextSpanID returns a new random span id.
func NextSpanID() uint64 {
return uint64(randGen.Int63())
}

View file

@ -1,10 +0,0 @@
// +build !windows
package tracer
import "time"
// now returns current UTC time in nanos.
func now() int64 {
return time.Now().UTC().UnixNano()
}

View file

@ -1,34 +0,0 @@
package tracer
import (
"golang.org/x/sys/windows"
"log"
"time"
)
// This method is more precise than the go1.8 time.Now on Windows
// See https://msdn.microsoft.com/en-us/library/windows/desktop/hh706895(v=vs.85).aspx
// It is however ~10x slower and requires Windows 8+.
func highPrecisionNow() int64 {
var ft windows.Filetime
windows.GetSystemTimePreciseAsFileTime(&ft)
return ft.Nanoseconds()
}
func lowPrecisionNow() int64 {
return time.Now().UTC().UnixNano()
}
var now func() int64
// If GetSystemTimePreciseAsFileTime is not available we default to the less
// precise implementation based on time.Now()
func init() {
if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil {
log.Printf("Unable to load high precison timer, defaulting to time.Now()")
now = lowPrecisionNow
} else {
log.Printf("Using high precision timer")
now = highPrecisionNow
}
}

View file

@ -1,429 +0,0 @@
package tracer
import (
"context"
"log"
"math/rand"
"os"
"strconv"
"sync"
"time"
"github.com/DataDog/dd-trace-go/tracer/ext"
)
const (
flushInterval = 2 * time.Second
)
func init() {
randGen = rand.New(newRandSource())
}
type Service struct {
Name string `json:"-"` // the internal of the service (e.g. acme_search, datadog_web)
App string `json:"app"` // the name of the application (e.g. rails, postgres, custom-app)
AppType string `json:"app_type"` // the type of the application (e.g. db, web)
}
func (s Service) Equal(s2 Service) bool {
return s.Name == s2.Name && s.App == s2.App && s.AppType == s2.AppType
}
// Tracer creates, buffers and submits Spans which are used to time blocks of
// compuration.
//
// When a tracer is disabled, it will not submit spans for processing.
type Tracer struct {
transport Transport // is the transport mechanism used to delivery spans to the agent
sampler sampler // is the trace sampler to only keep some samples
debugMu sync.RWMutex // protects the debugLoggingEnabled attribute while allowing concurrent reads
debugLoggingEnabled bool
enableMu sync.RWMutex
enabled bool // defines if the Tracer is enabled or not
meta map[string]string
metaMu sync.RWMutex
channels tracerChans
services map[string]Service // name -> service
exit chan struct{}
exitWG *sync.WaitGroup
forceFlushIn chan struct{}
forceFlushOut chan struct{}
}
// NewTracer creates a new Tracer. Most users should use the package's
// DefaultTracer instance.
func NewTracer() *Tracer {
return NewTracerTransport(newDefaultTransport())
}
// NewTracerTransport create a new Tracer with the given transport.
func NewTracerTransport(transport Transport) *Tracer {
t := &Tracer{
enabled: true,
transport: transport,
sampler: newAllSampler(),
debugLoggingEnabled: false,
channels: newTracerChans(),
services: make(map[string]Service),
exit: make(chan struct{}),
exitWG: &sync.WaitGroup{},
forceFlushIn: make(chan struct{}, 0), // must be size 0 (blocking)
forceFlushOut: make(chan struct{}, 0), // must be size 0 (blocking)
}
// start a background worker
t.exitWG.Add(1)
go t.worker()
return t
}
// Stop stops the tracer.
func (t *Tracer) Stop() {
close(t.exit)
t.exitWG.Wait()
}
// SetEnabled will enable or disable the tracer.
func (t *Tracer) SetEnabled(enabled bool) {
t.enableMu.Lock()
defer t.enableMu.Unlock()
t.enabled = enabled
}
// Enabled returns whether or not a tracer is enabled.
func (t *Tracer) Enabled() bool {
t.enableMu.RLock()
defer t.enableMu.RUnlock()
return t.enabled
}
// SetSampleRate sets a sample rate for all the future traces.
// sampleRate has to be between 0.0 and 1.0 and represents the ratio of traces
// that will be sampled. 0.0 means that the tracer won't send any trace. 1.0
// means that the tracer will send all traces.
func (t *Tracer) SetSampleRate(sampleRate float64) {
if sampleRate == 1 {
t.sampler = newAllSampler()
} else if sampleRate >= 0 && sampleRate < 1 {
t.sampler = newRateSampler(sampleRate)
} else {
log.Printf("tracer.SetSampleRate rate must be between 0 and 1, now: %f", sampleRate)
}
}
// SetServiceInfo update the application and application type for the given
// service.
func (t *Tracer) SetServiceInfo(name, app, appType string) {
t.channels.pushService(Service{
Name: name,
App: app,
AppType: appType,
})
}
// SetMeta adds an arbitrary meta field at the tracer level.
// This will append those tags to each span created by the tracer.
func (t *Tracer) SetMeta(key, value string) {
if t == nil { // Defensive, span could be initialized with nil tracer
return
}
t.metaMu.Lock()
if t.meta == nil {
t.meta = make(map[string]string)
}
t.meta[key] = value
t.metaMu.Unlock()
}
// getAllMeta returns all the meta set by this tracer.
// In most cases, it is nil.
func (t *Tracer) getAllMeta() map[string]string {
if t == nil { // Defensive, span could be initialized with nil tracer
return nil
}
var meta map[string]string
t.metaMu.RLock()
if t.meta != nil {
meta = make(map[string]string, len(t.meta))
for key, value := range t.meta {
meta[key] = value
}
}
t.metaMu.RUnlock()
return meta
}
// NewRootSpan creates a span with no parent. Its ids will be randomly
// assigned.
func (t *Tracer) NewRootSpan(name, service, resource string) *Span {
spanID := NextSpanID()
span := NewSpan(name, service, resource, spanID, spanID, 0, t)
span.buffer = newSpanBuffer(t.channels, 0, 0)
t.Sample(span)
// [TODO:christian] introduce distributed sampling here
span.buffer.Push(span)
// Add the process id to all root spans
span.SetMeta(ext.Pid, strconv.Itoa(os.Getpid()))
return span
}
// NewChildSpan returns a new span that is child of the Span passed as
// argument.
func (t *Tracer) NewChildSpan(name string, parent *Span) *Span {
spanID := NextSpanID()
// when we're using parenting in inner functions, it's possible that
// a nil pointer is sent to this function as argument. To prevent a crash,
// it's better to be defensive and to produce a wrongly configured span
// that is not sent to the trace agent.
if parent == nil {
span := NewSpan(name, "", name, spanID, spanID, spanID, t)
span.buffer = newSpanBuffer(t.channels, 0, 0)
t.Sample(span)
// [TODO:christian] introduce distributed sampling here
span.buffer.Push(span)
return span
}
parent.RLock()
// child that is correctly configured
span := NewSpan(name, parent.Service, name, spanID, parent.TraceID, parent.SpanID, parent.tracer)
// child sampling same as the parent
span.Sampled = parent.Sampled
if parent.HasSamplingPriority() {
span.SetSamplingPriority(parent.GetSamplingPriority())
}
span.parent = parent
span.buffer = parent.buffer
parent.RUnlock()
span.buffer.Push(span)
return span
}
// NewChildSpanFromContext will create a child span of the span contained in
// the given context. If the context contains no span, an empty span will be
// returned.
func (t *Tracer) NewChildSpanFromContext(name string, ctx context.Context) *Span {
span, _ := SpanFromContext(ctx) // tolerate nil spans
return t.NewChildSpan(name, span)
}
// NewChildSpanWithContext will create and return a child span of the span contained in the given
// context, as well as a copy of the parent context containing the created
// child span. If the context contains no span, an empty root span will be returned.
// If nil is passed in for the context, a context will be created.
func (t *Tracer) NewChildSpanWithContext(name string, ctx context.Context) (*Span, context.Context) {
span := t.NewChildSpanFromContext(name, ctx)
return span, span.Context(ctx)
}
// SetDebugLogging will set the debug level
func (t *Tracer) SetDebugLogging(debug bool) {
t.debugMu.Lock()
defer t.debugMu.Unlock()
t.debugLoggingEnabled = debug
}
// DebugLoggingEnabled returns true if the debug level is enabled and false otherwise.
func (t *Tracer) DebugLoggingEnabled() bool {
t.debugMu.RLock()
defer t.debugMu.RUnlock()
return t.debugLoggingEnabled
}
func (t *Tracer) getTraces() [][]*Span {
traces := make([][]*Span, 0, len(t.channels.trace))
for {
select {
case trace := <-t.channels.trace:
traces = append(traces, trace)
default: // return when there's no more data
return traces
}
}
}
// flushTraces will push any currently buffered traces to the server.
func (t *Tracer) flushTraces() {
traces := t.getTraces()
if t.DebugLoggingEnabled() {
log.Printf("Sending %d traces", len(traces))
for _, trace := range traces {
if len(trace) > 0 {
log.Printf("TRACE: %d\n", trace[0].TraceID)
for _, span := range trace {
log.Printf("SPAN:\n%s", span.String())
}
}
}
}
// bal if there's nothing to do
if !t.Enabled() || t.transport == nil || len(traces) == 0 {
return
}
_, err := t.transport.SendTraces(traces)
if err != nil {
t.channels.pushErr(err)
t.channels.pushErr(&errorFlushLostTraces{Nb: len(traces)}) // explicit log messages with nb of lost traces
}
}
func (t *Tracer) updateServices() bool {
servicesModified := false
for {
select {
case service := <-t.channels.service:
if s, found := t.services[service.Name]; !found || !s.Equal(service) {
t.services[service.Name] = service
servicesModified = true
}
default: // return when there's no more data
return servicesModified
}
}
}
// flushTraces will push any currently buffered services to the server.
func (t *Tracer) flushServices() {
servicesModified := t.updateServices()
if !t.Enabled() || !servicesModified {
return
}
_, err := t.transport.SendServices(t.services)
if err != nil {
t.channels.pushErr(err)
t.channels.pushErr(&errorFlushLostServices{Nb: len(t.services)}) // explicit log messages with nb of lost services
}
}
// flushErrs will process log messages that were queued
func (t *Tracer) flushErrs() {
logErrors(t.channels.err)
}
func (t *Tracer) flush() {
t.flushTraces()
t.flushServices()
t.flushErrs()
}
// ForceFlush forces a flush of data (traces and services) to the agent.
// Flushes are done by a background task on a regular basis, so you never
// need to call this manually, mostly useful for testing and debugging.
func (t *Tracer) ForceFlush() {
t.forceFlushIn <- struct{}{}
<-t.forceFlushOut
}
// Sample samples a span with the internal sampler.
func (t *Tracer) Sample(span *Span) {
t.sampler.Sample(span)
}
// worker periodically flushes traces and services to the transport.
func (t *Tracer) worker() {
defer t.exitWG.Done()
flushTicker := time.NewTicker(flushInterval)
defer flushTicker.Stop()
for {
select {
case <-flushTicker.C:
t.flush()
case <-t.forceFlushIn:
t.flush()
t.forceFlushOut <- struct{}{} // caller blocked until this is done
case <-t.channels.traceFlush:
t.flushTraces()
case <-t.channels.serviceFlush:
t.flushServices()
case <-t.channels.errFlush:
t.flushErrs()
case <-t.exit:
t.flush()
return
}
}
}
// DefaultTracer is a global tracer that is enabled by default. All of the
// packages top level NewSpan functions use the default tracer.
//
// span := tracer.NewRootSpan("sql.query", "user-db", "select * from foo where id = ?")
// defer span.Finish()
//
var DefaultTracer = NewTracer()
// NewRootSpan creates a span with no parent. Its ids will be randomly
// assigned.
func NewRootSpan(name, service, resource string) *Span {
return DefaultTracer.NewRootSpan(name, service, resource)
}
// NewChildSpan creates a span that is a child of parent. It will inherit the
// parent's service and resource.
func NewChildSpan(name string, parent *Span) *Span {
return DefaultTracer.NewChildSpan(name, parent)
}
// NewChildSpanFromContext will create a child span of the span contained in
// the given context. If the context contains no span, a span with
// no service or resource will be returned.
func NewChildSpanFromContext(name string, ctx context.Context) *Span {
return DefaultTracer.NewChildSpanFromContext(name, ctx)
}
// NewChildSpanWithContext will create and return a child span of the span contained in the given
// context, as well as a copy of the parent context containing the created
// child span. If the context contains no span, an empty root span will be returned.
// If nil is passed in for the context, a context will be created.
func NewChildSpanWithContext(name string, ctx context.Context) (*Span, context.Context) {
return DefaultTracer.NewChildSpanWithContext(name, ctx)
}
// Enable will enable the default tracer.
func Enable() {
DefaultTracer.SetEnabled(true)
}
// Disable will disable the default tracer.
func Disable() {
DefaultTracer.SetEnabled(false)
}

View file

@ -1,195 +0,0 @@
package tracer
import (
"errors"
"fmt"
"log"
"net/http"
"strconv"
"time"
"github.com/DataDog/dd-trace-go/tracer/ext"
)
const (
defaultHostname = "localhost"
defaultPort = "8126"
defaultHTTPTimeout = time.Second // defines the current timeout before giving up with the send process
traceCountHeader = "X-Datadog-Trace-Count" // header containing the number of traces in the payload
)
// Transport is an interface for span submission to the agent.
type Transport interface {
SendTraces(spans [][]*Span) (*http.Response, error)
SendServices(services map[string]Service) (*http.Response, error)
SetHeader(key, value string)
}
// NewTransport returns a new Transport implementation that sends traces to a
// trace agent running on the given hostname and port. If the zero values for
// hostname and port are provided, the default values will be used ("localhost"
// for hostname, and "8126" for port).
//
// In general, using this method is only necessary if you have a trace agent
// running on a non-default port or if it's located on another machine.
func NewTransport(hostname, port string) Transport {
if hostname == "" {
hostname = defaultHostname
}
if port == "" {
port = defaultPort
}
return newHTTPTransport(hostname, port)
}
// newDefaultTransport return a default transport for this tracing client
func newDefaultTransport() Transport {
return newHTTPTransport(defaultHostname, defaultPort)
}
type httpTransport struct {
traceURL string // the delivery URL for traces
legacyTraceURL string // the legacy delivery URL for traces
serviceURL string // the delivery URL for services
legacyServiceURL string // the legacy delivery URL for services
client *http.Client // the HTTP client used in the POST
headers map[string]string // the Transport headers
compatibilityMode bool // the Agent targets a legacy API for compatibility reasons
// [WARNING] We tried to reuse encoders thanks to a pool, but that led us to having race conditions.
// Indeed, when we send the encoder as the request body, the persistConn.writeLoop() goroutine
// can theoretically read the underlying buffer whereas the encoder has been returned to the pool.
// Since the underlying bytes.Buffer is not thread safe, this can make the app panicking.
// since this method will later on spawn a goroutine referencing this buffer.
// That's why we prefer the less performant yet SAFE implementation of allocating a new encoder every time we flush.
getEncoder encoderFactory
}
// newHTTPTransport returns an httpTransport for the given endpoint
func newHTTPTransport(hostname, port string) *httpTransport {
// initialize the default EncoderPool with Encoder headers
defaultHeaders := map[string]string{
"Datadog-Meta-Lang": ext.Lang,
"Datadog-Meta-Lang-Version": ext.LangVersion,
"Datadog-Meta-Lang-Interpreter": ext.Interpreter,
"Datadog-Meta-Tracer-Version": ext.TracerVersion,
}
return &httpTransport{
traceURL: fmt.Sprintf("http://%s:%s/v0.3/traces", hostname, port),
legacyTraceURL: fmt.Sprintf("http://%s:%s/v0.2/traces", hostname, port),
serviceURL: fmt.Sprintf("http://%s:%s/v0.3/services", hostname, port),
legacyServiceURL: fmt.Sprintf("http://%s:%s/v0.2/services", hostname, port),
getEncoder: msgpackEncoderFactory,
client: &http.Client{
Timeout: defaultHTTPTimeout,
},
headers: defaultHeaders,
compatibilityMode: false,
}
}
func (t *httpTransport) SendTraces(traces [][]*Span) (*http.Response, error) {
if t.traceURL == "" {
return nil, errors.New("provided an empty URL, giving up")
}
encoder := t.getEncoder()
// encode the spans and return the error if any
err := encoder.EncodeTraces(traces)
if err != nil {
return nil, err
}
// prepare the client and send the payload
req, _ := http.NewRequest("POST", t.traceURL, encoder)
for header, value := range t.headers {
req.Header.Set(header, value)
}
req.Header.Set(traceCountHeader, strconv.Itoa(len(traces)))
req.Header.Set("Content-Type", encoder.ContentType())
response, err := t.client.Do(req)
// if we have an error, return an empty Response to protect against nil pointer dereference
if err != nil {
return &http.Response{StatusCode: 0}, err
}
defer response.Body.Close()
// if we got a 404 we should downgrade the API to a stable version (at most once)
if (response.StatusCode == 404 || response.StatusCode == 415) && !t.compatibilityMode {
log.Printf("calling the endpoint '%s' but received %d; downgrading the API\n", t.traceURL, response.StatusCode)
t.apiDowngrade()
return t.SendTraces(traces)
}
if sc := response.StatusCode; sc != 200 {
return response, fmt.Errorf("SendTraces expected response code 200, received %v", sc)
}
return response, err
}
func (t *httpTransport) SendServices(services map[string]Service) (*http.Response, error) {
if t.serviceURL == "" {
return nil, errors.New("provided an empty URL, giving up")
}
encoder := t.getEncoder()
if err := encoder.EncodeServices(services); err != nil {
return nil, err
}
// Send it
req, err := http.NewRequest("POST", t.serviceURL, encoder)
if err != nil {
return nil, fmt.Errorf("cannot create http request: %v", err)
}
for header, value := range t.headers {
req.Header.Set(header, value)
}
req.Header.Set("Content-Type", encoder.ContentType())
response, err := t.client.Do(req)
if err != nil {
return &http.Response{StatusCode: 0}, err
}
defer response.Body.Close()
// Downgrade if necessary
if (response.StatusCode == 404 || response.StatusCode == 415) && !t.compatibilityMode {
log.Printf("calling the endpoint '%s' but received %d; downgrading the API\n", t.traceURL, response.StatusCode)
t.apiDowngrade()
return t.SendServices(services)
}
if sc := response.StatusCode; sc != 200 {
return response, fmt.Errorf("SendServices expected response code 200, received %v", sc)
}
return response, err
}
// SetHeader sets the internal header for the httpTransport
func (t *httpTransport) SetHeader(key, value string) {
t.headers[key] = value
}
// changeEncoder switches the encoder so that a different API with different
// format can be targeted, preventing failures because of outdated agents
func (t *httpTransport) changeEncoder(encoderFactory encoderFactory) {
t.getEncoder = encoderFactory
}
// apiDowngrade downgrades the used encoder and API level. This method must fallback to a safe
// encoder and API, so that it will success despite users' configurations. This action
// ensures that the compatibility mode is activated so that the downgrade will be
// executed only once.
func (t *httpTransport) apiDowngrade() {
t.compatibilityMode = true
t.traceURL = t.legacyTraceURL
t.serviceURL = t.legacyServiceURL
t.changeEncoder(jsonEncoderFactory)
}

View file

@ -1,20 +0,0 @@
Copyright (c) 2013 Shopify
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,119 +0,0 @@
package sarama
type Resource struct {
ResourceType AclResourceType
ResourceName string
}
func (r *Resource) encode(pe packetEncoder) error {
pe.putInt8(int8(r.ResourceType))
if err := pe.putString(r.ResourceName); err != nil {
return err
}
return nil
}
func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
r.ResourceType = AclResourceType(resourceType)
if r.ResourceName, err = pd.getString(); err != nil {
return err
}
return nil
}
type Acl struct {
Principal string
Host string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *Acl) encode(pe packetEncoder) error {
if err := pe.putString(a.Principal); err != nil {
return err
}
if err := pe.putString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
if a.Principal, err = pd.getString(); err != nil {
return err
}
if a.Host, err = pd.getString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}
type ResourceAcls struct {
Resource
Acls []*Acl
}
func (r *ResourceAcls) encode(pe packetEncoder) error {
if err := r.Resource.encode(pe); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Acls)); err != nil {
return err
}
for _, acl := range r.Acls {
if err := acl.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
if err := r.Resource.decode(pd, version); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Acls = make([]*Acl, n)
for i := 0; i < n; i++ {
r.Acls[i] = new(Acl)
if err := r.Acls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}

View file

@ -1,76 +0,0 @@
package sarama
type CreateAclsRequest struct {
AclCreations []*AclCreation
}
func (c *CreateAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
return err
}
for _, aclCreation := range c.AclCreations {
if err := aclCreation.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreations = make([]*AclCreation, n)
for i := 0; i < n; i++ {
c.AclCreations[i] = new(AclCreation)
if err := c.AclCreations[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsRequest) key() int16 {
return 30
}
func (d *CreateAclsRequest) version() int16 {
return 0
}
func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreation struct {
Resource
Acl
}
func (a *AclCreation) encode(pe packetEncoder) error {
if err := a.Resource.encode(pe); err != nil {
return err
}
if err := a.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
if err := a.Resource.decode(pd, version); err != nil {
return err
}
if err := a.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View file

@ -1,88 +0,0 @@
package sarama
import "time"
type CreateAclsResponse struct {
ThrottleTime time.Duration
AclCreationResponses []*AclCreationResponse
}
func (c *CreateAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
return err
}
for _, aclCreationResponse := range c.AclCreationResponses {
if err := aclCreationResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreationResponses = make([]*AclCreationResponse, n)
for i := 0; i < n; i++ {
c.AclCreationResponses[i] = new(AclCreationResponse)
if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsResponse) key() int16 {
return 30
}
func (d *CreateAclsResponse) version() int16 {
return 0
}
func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreationResponse struct {
Err KError
ErrMsg *string
}
func (a *AclCreationResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(a.Err))
if err := pe.putNullableString(a.ErrMsg); err != nil {
return err
}
return nil
}
func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
if a.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View file

@ -1,48 +0,0 @@
package sarama
type DeleteAclsRequest struct {
Filters []*AclFilter
}
func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Filters)); err != nil {
return err
}
for _, filter := range d.Filters {
if err := filter.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.Filters = make([]*AclFilter, n)
for i := 0; i < n; i++ {
d.Filters[i] = new(AclFilter)
if err := d.Filters[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) key() int16 {
return 31
}
func (d *DeleteAclsRequest) version() int16 {
return 0
}
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,155 +0,0 @@
package sarama
import "time"
type DeleteAclsResponse struct {
ThrottleTime time.Duration
FilterResponses []*FilterResponse
}
func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
return err
}
for _, filterResponse := range a.FilterResponses {
if err := filterResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.FilterResponses = make([]*FilterResponse, n)
for i := 0; i < n; i++ {
a.FilterResponses[i] = new(FilterResponse)
if err := a.FilterResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) key() int16 {
return 31
}
func (d *DeleteAclsResponse) version() int16 {
return 0
}
func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type FilterResponse struct {
Err KError
ErrMsg *string
MatchingAcls []*MatchingAcl
}
func (f *FilterResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(f.Err))
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
return err
}
for _, matchingAcl := range f.MatchingAcls {
if err := matchingAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(kerr)
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
f.MatchingAcls = make([]*MatchingAcl, n)
for i := 0; i < n; i++ {
f.MatchingAcls[i] = new(MatchingAcl)
if err := f.MatchingAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
type MatchingAcl struct {
Err KError
ErrMsg *string
Resource
Acl
}
func (m *MatchingAcl) encode(pe packetEncoder) error {
pe.putInt16(int16(m.Err))
if err := pe.putNullableString(m.ErrMsg); err != nil {
return err
}
if err := m.Resource.encode(pe); err != nil {
return err
}
if err := m.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
m.Err = KError(kerr)
if m.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
if err := m.Resource.decode(pd, version); err != nil {
return err
}
if err := m.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View file

@ -1,25 +0,0 @@
package sarama
type DescribeAclsRequest struct {
AclFilter
}
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
return d.AclFilter.encode(pe)
}
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
return d.AclFilter.decode(pd, version)
}
func (d *DescribeAclsRequest) key() int16 {
return 29
}
func (d *DescribeAclsRequest) version() int16 {
return 0
}
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,80 +0,0 @@
package sarama
import "time"
type DescribeAclsResponse struct {
ThrottleTime time.Duration
Err KError
ErrMsg *string
ResourceAcls []*ResourceAcls
}
func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
pe.putInt16(int16(d.Err))
if err := pe.putNullableString(d.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
return err
}
for _, resourceAcl := range d.ResourceAcls {
if err := resourceAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
d.Err = KError(kerr)
errmsg, err := pd.getString()
if err != nil {
return err
}
if errmsg != "" {
d.ErrMsg = &errmsg
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.ResourceAcls = make([]*ResourceAcls, n)
for i := 0; i < n; i++ {
d.ResourceAcls[i] = new(ResourceAcls)
if err := d.ResourceAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) key() int16 {
return 29
}
func (d *DescribeAclsResponse) version() int16 {
return 0
}
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,61 +0,0 @@
package sarama
type AclFilter struct {
ResourceType AclResourceType
ResourceName *string
Principal *string
Host *string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *AclFilter) encode(pe packetEncoder) error {
pe.putInt8(int8(a.ResourceType))
if err := pe.putNullableString(a.ResourceName); err != nil {
return err
}
if err := pe.putNullableString(a.Principal); err != nil {
return err
}
if err := pe.putNullableString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
a.ResourceType = AclResourceType(resourceType)
if a.ResourceName, err = pd.getNullableString(); err != nil {
return err
}
if a.Principal, err = pd.getNullableString(); err != nil {
return err
}
if a.Host, err = pd.getNullableString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}

View file

@ -1,42 +0,0 @@
package sarama
type AclOperation int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
const (
AclOperationUnknown AclOperation = 0
AclOperationAny AclOperation = 1
AclOperationAll AclOperation = 2
AclOperationRead AclOperation = 3
AclOperationWrite AclOperation = 4
AclOperationCreate AclOperation = 5
AclOperationDelete AclOperation = 6
AclOperationAlter AclOperation = 7
AclOperationDescribe AclOperation = 8
AclOperationClusterAction AclOperation = 9
AclOperationDescribeConfigs AclOperation = 10
AclOperationAlterConfigs AclOperation = 11
AclOperationIdempotentWrite AclOperation = 12
)
type AclPermissionType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
const (
AclPermissionUnknown AclPermissionType = 0
AclPermissionAny AclPermissionType = 1
AclPermissionDeny AclPermissionType = 2
AclPermissionAllow AclPermissionType = 3
)
type AclResourceType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
const (
AclResourceUnknown AclResourceType = 0
AclResourceAny AclResourceType = 1
AclResourceTopic AclResourceType = 2
AclResourceGroup AclResourceType = 3
AclResourceCluster AclResourceType = 4
AclResourceTransactionalID AclResourceType = 5
)

View file

@ -1,52 +0,0 @@
package sarama
type AddOffsetsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
GroupID string
}
func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putString(a.GroupID); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.GroupID, err = pd.getString(); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) key() int16 {
return 25
}
func (a *AddOffsetsToTxnRequest) version() int16 {
return 0
}
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,44 +0,0 @@
package sarama
import (
"time"
)
type AddOffsetsToTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
pe.putInt16(int16(a.Err))
return nil
}
func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
return nil
}
func (a *AddOffsetsToTxnResponse) key() int16 {
return 25
}
func (a *AddOffsetsToTxnResponse) version() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,76 +0,0 @@
package sarama
type AddPartitionsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TopicPartitions map[string][]int32
}
func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
return err
}
for topic, partitions := range a.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.TopicPartitions = make(map[string][]int32)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
a.TopicPartitions[topic] = partitions
}
return nil
}
func (a *AddPartitionsToTxnRequest) key() int16 {
return 24
}
func (a *AddPartitionsToTxnRequest) version() int16 {
return 0
}
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,108 +0,0 @@
package sarama
import (
"time"
)
type AddPartitionsToTxnResponse struct {
ThrottleTime time.Duration
Errors map[string][]*PartitionError
}
func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Errors)); err != nil {
return err
}
for topic, e := range a.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
a.Errors[topic][j] = new(PartitionError)
if err := a.Errors[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) key() int16 {
return 24
}
func (a *AddPartitionsToTxnResponse) version() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type PartitionError struct {
Partition int32
Err KError
}
func (p *PartitionError) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt16(int16(p.Err))
return nil
}
func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
p.Err = KError(kerr)
return nil
}

View file

@ -1,120 +0,0 @@
package sarama
type AlterConfigsRequest struct {
Resources []*AlterConfigsResource
ValidateOnly bool
}
type AlterConfigsResource struct {
Type ConfigResourceType
Name string
ConfigEntries map[string]*string
}
func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(acr.Resources)); err != nil {
return err
}
for _, r := range acr.Resources {
if err := r.encode(pe); err != nil {
return err
}
}
pe.putBool(acr.ValidateOnly)
return nil
}
func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
resourceCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResource, resourceCount)
for i := range acr.Resources {
r := &AlterConfigsResource{}
err = r.decode(pd, version)
if err != nil {
return err
}
acr.Resources[i] = r
}
validateOnly, err := pd.getBool()
if err != nil {
return err
}
acr.ValidateOnly = validateOnly
return nil
}
func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
pe.putInt8(int8(ac.Type))
if err := pe.putString(ac.Name); err != nil {
return err
}
if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range ac.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
t, err := pd.getInt8()
if err != nil {
return err
}
ac.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
ac.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
ac.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return err
}
func (acr *AlterConfigsRequest) key() int16 {
return 33
}
func (acr *AlterConfigsRequest) version() int16 {
return 0
}
func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,95 +0,0 @@
package sarama
import "time"
type AlterConfigsResponse struct {
ThrottleTime time.Duration
Resources []*AlterConfigsResourceResponse
}
type AlterConfigsResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
}
func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(ct.Resources)); err != nil {
return err
}
for i := range ct.Resources {
pe.putInt16(ct.Resources[i].ErrorCode)
err := pe.putString(ct.Resources[i].ErrorMsg)
if err != nil {
return nil
}
pe.putInt8(int8(ct.Resources[i].Type))
err = pe.putString(ct.Resources[i].Name)
if err != nil {
return nil
}
}
return nil
}
func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
responseCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
for i := range acr.Resources {
acr.Resources[i] = new(AlterConfigsResourceResponse)
errCode, err := pd.getInt16()
if err != nil {
return err
}
acr.Resources[i].ErrorCode = errCode
e, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].ErrorMsg = e
t, err := pd.getInt8()
if err != nil {
return err
}
acr.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].Name = name
}
return nil
}
func (r *AlterConfigsResponse) key() int16 {
return 32
}
func (r *AlterConfigsResponse) version() int16 {
return 0
}
func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,24 +0,0 @@
package sarama
type ApiVersionsRequest struct {
}
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ApiVersionsRequest) key() int16 {
return 18
}
func (r *ApiVersionsRequest) version() int16 {
return 0
}
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View file

@ -1,87 +0,0 @@
package sarama
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
MaxVersion int16
}
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
pe.putInt16(b.ApiKey)
pe.putInt16(b.MinVersion)
pe.putInt16(b.MaxVersion)
return nil
}
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
var err error
if b.ApiKey, err = pd.getInt16(); err != nil {
return err
}
if b.MinVersion, err = pd.getInt16(); err != nil {
return err
}
if b.MaxVersion, err = pd.getInt16(); err != nil {
return err
}
return nil
}
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
}
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
return err
}
for _, apiVersion := range r.ApiVersions {
if err := apiVersion.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
for i := 0; i < numBlocks; i++ {
block := new(ApiVersionsResponseBlock)
if err := block.decode(pd); err != nil {
return err
}
r.ApiVersions[i] = block
}
return nil
}
func (r *ApiVersionsResponse) key() int16 {
return 18
}
func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View file

@ -1,921 +0,0 @@
package sarama
import (
"encoding/binary"
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
// when both the Errors and Successes channels have been closed. When calling
// AsyncClose, you *must* continue to read from those channels in order to
// drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
// Input is the input channel for the user to write messages to that they
// wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this
// channel or the Producer will deadlock when the channel is full. Alternatively,
// you can set Producer.Return.Errors in your config to false, which prevents
// errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
fin // final message from partitionProducer to brokerProducer and back
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
// The partitioning key for this message. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Key Encoder
// The actual message to store in Kafka. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Value Encoder
// The headers are key-value pairs that are transparently passed
// by Kafka between producers and consumers.
Headers []RecordHeader
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
// pass-through data.
Metadata interface{}
// Below this point are filled in by the producer as the message is processed
// Offset is the offset of the message stored on the broker. This is only
// guaranteed to be defined if the message was successfully delivered and
// RequiredAcks is not NoResponse.
Offset int64
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp is the timestamp assigned to the message by the broker. This
// is only guaranteed to be defined if the message was successfully
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
// least version 0.10.0.
Timestamp time.Time
retries int
flags flagSet
}
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize(version int) int {
var size int
if version >= 2 {
size = maximumRecordOverhead
for _, h := range m.Headers {
size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
}
} else {
size = producerMessageOverhead
}
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
} else {
<-p.errors
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
version := 1
if p.conf.Version.IsAtLeast(V0_11_0_0) {
version = 2
}
if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
if msg.flags&fin == fin {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&fin == fin {
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a fin so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
return nil
})
}
// one per broker; also constructs an associated flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
var (
input = make(chan *ProducerMessage)
bridge = make(chan *produceSet)
responses = make(chan *brokerProducerResponse)
)
bp := &brokerProducer{
parent: p,
broker: broker,
input: input,
output: bridge,
responses: responses,
buffer: newProduceSet(p),
currentRetries: make(map[string]map[int32]error),
}
go withRecover(bp.run)
// minimal bridge to make the network response `select`able
go withRecover(func() {
for set := range bridge {
request := set.buildRequest()
response, err := broker.Produce(request)
responses <- &brokerProducerResponse{
set: set,
err: err,
res: response,
}
}
close(responses)
})
return input
}
type brokerProducerResponse struct {
set *produceSet
err error
res *ProduceResponse
}
// groups messages together into appropriately-sized batches for sending to the broker
// handles state related to retries etc
type brokerProducer struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- *produceSet
responses <-chan *brokerProducerResponse
buffer *produceSet
timer <-chan time.Time
timerFired bool
closing error
currentRetries map[string]map[int32]error
}
func (bp *brokerProducer) run() {
var output chan<- *produceSet
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
for {
select {
case msg := <-bp.input:
if msg == nil {
bp.shutdown()
return
}
if msg.flags&syn == syn {
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
if bp.currentRetries[msg.Topic] == nil {
bp.currentRetries[msg.Topic] = make(map[int32]error)
}
bp.currentRetries[msg.Topic][msg.Partition] = nil
bp.parent.inFlight.Done()
continue
}
if reason := bp.needsRetry(msg); reason != nil {
bp.parent.retryMessage(msg, reason)
if bp.closing == nil && msg.flags&fin == fin {
// we were retrying this partition but we can start processing again
delete(bp.currentRetries[msg.Topic], msg.Partition)
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
}
continue
}
if bp.buffer.wouldOverflow(msg) {
if err := bp.waitForSpace(msg); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if err := bp.buffer.add(msg); err != nil {
bp.parent.returnError(msg, err)
continue
}
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
}
case <-bp.timer:
bp.timerFired = true
case output <- bp.buffer:
bp.rollOver()
case response := <-bp.responses:
bp.handleResponse(response)
}
if bp.timerFired || bp.buffer.readyToFlush() {
output = bp.output
} else {
output = nil
}
}
}
func (bp *brokerProducer) shutdown() {
for !bp.buffer.empty() {
select {
case response := <-bp.responses:
bp.handleResponse(response)
case bp.output <- bp.buffer:
bp.rollOver()
}
}
close(bp.output)
for response := range bp.responses {
bp.handleResponse(response)
}
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
}
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
if bp.closing != nil {
return bp.closing
}
return bp.currentRetries[msg.Topic][msg.Partition]
}
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
for {
select {
case response := <-bp.responses:
bp.handleResponse(response)
// handling a response can change our state, so re-check some things
if reason := bp.needsRetry(msg); reason != nil {
return reason
} else if !bp.buffer.wouldOverflow(msg) {
return nil
}
case bp.output <- bp.buffer:
bp.rollOver()
return nil
}
}
}
func (bp *brokerProducer) rollOver() {
bp.timer = nil
bp.timerFired = false
bp.buffer = newProduceSet(bp.parent)
}
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
if response.err != nil {
bp.handleError(response.set, response.err)
} else {
bp.handleSuccess(response.set, response.res)
}
if bp.buffer.empty() {
bp.rollOver() // this can happen if the response invalidated our buffer
}
}
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
bp.parent.returnSuccesses(msgs)
return
}
block := response.GetBlock(topic, partition)
if block == nil {
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
return
}
switch block.Err {
// Success
case ErrNoError:
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
for _, msg := range msgs {
msg.Timestamp = block.Timestamp
}
}
for i, msg := range msgs {
msg.Offset = block.Offset + int64(i)
}
bp.parent.returnSuccesses(msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
bp.currentRetries[topic][partition] = block.Err
bp.parent.retryMessages(msgs, block.Err)
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
// Other non-retriable errors
default:
bp.parent.returnErrors(msgs, block.Err)
}
})
}
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
switch err.(type) {
case PacketEncodingError:
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.returnErrors(msgs, err)
})
default:
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_ = bp.broker.Close()
bp.closing = err
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.rollOver()
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.returnError(msg, err)
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.retryMessage(msg, err)
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

View file

@ -1,883 +0,0 @@
package sarama
import (
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
type Broker struct {
id int32
addr string
rack *string
conf *Config
correlationID int32
conn net.Conn
connErr error
lock sync.Mutex
opened int32
responses chan responsePromise
done chan bool
incomingByteRate metrics.Meter
requestRate metrics.Meter
requestSize metrics.Histogram
requestLatency metrics.Histogram
outgoingByteRate metrics.Meter
responseRate metrics.Meter
responseSize metrics.Histogram
brokerIncomingByteRate metrics.Meter
brokerRequestRate metrics.Meter
brokerRequestSize metrics.Histogram
brokerRequestLatency metrics.Histogram
brokerOutgoingByteRate metrics.Meter
brokerResponseRate metrics.Meter
brokerResponseSize metrics.Histogram
}
type responsePromise struct {
requestTime time.Time
correlationID int32
packets chan []byte
errors chan error
}
// NewBroker creates and returns a Broker targeting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr}
}
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
// waiting for the connection to complete. This means that any subsequent operations on the broker will
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
func (b *Broker) Open(conf *Config) error {
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
return ErrAlreadyConnected
}
if conf == nil {
conf = NewConfig()
}
err := conf.Validate()
if err != nil {
return err
}
b.lock.Lock()
go withRecover(func() {
defer b.lock.Unlock()
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
}
if conf.Net.TLS.Enable {
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
} else {
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
}
if b.connErr != nil {
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
b.conn = newBufConn(b.conn)
b.conf = conf
// Create or reuse the global metrics shared between brokers
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
// the same id (-1) and are already exposed through the global metrics above
if b.id >= 0 {
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
}
if conf.Net.SASL.Enable {
b.connErr = b.sendAndReceiveSASLPlainAuth()
if b.connErr != nil {
err = b.conn.Close()
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
}
b.done = make(chan bool)
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
if b.id >= 0 {
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
} else {
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
}
go withRecover(b.responseReceiver)
})
return nil
}
// Connected returns true if the broker is connected and false otherwise. If the broker is not
// connected but it had tried to connect, the error from that connection attempt is also returned.
func (b *Broker) Connected() (bool, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.conn != nil, b.connErr
}
func (b *Broker) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
return ErrNotConnected
}
close(b.responses)
<-b.done
err := b.conn.Close()
b.conn = nil
b.connErr = nil
b.done = nil
b.responses = nil
if b.id >= 0 {
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
}
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
atomic.StoreInt32(&b.opened, 0)
return err
}
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
func (b *Broker) ID() int32 {
return b.id
}
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
func (b *Broker) Addr() string {
return b.addr
}
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
response := new(MetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
response := new(ConsumerMetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
response := new(FindCoordinatorResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
response := new(OffsetResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
var response *ProduceResponse
var err error
if request.RequiredAcks == NoResponse {
err = b.sendAndReceive(request, nil)
} else {
response = new(ProduceResponse)
err = b.sendAndReceive(request, response)
}
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
response := new(FetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
response := new(OffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
response := new(OffsetFetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
response := new(JoinGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
response := new(SyncGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
response := new(LeaveGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
response := new(HeartbeatResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
response := new(ListGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
response := new(DescribeGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
response := new(CreatePartitionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
response := new(CreateTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
response := new(DeleteTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
response := new(DeleteRecordsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
response := new(DescribeAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
response := new(CreateAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
response := new(DeleteAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
response := new(InitProducerIDResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
response := new(AddPartitionsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
response := new(AddOffsetsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
response := new(EndTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
response := new(TxnOffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
response := new(DescribeConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
response := new(AlterConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
response := new(DeleteGroupsResponse)
if err := b.sendAndReceive(request, response); err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
if b.connErr != nil {
return nil, b.connErr
}
return nil, ErrNotConnected
}
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
return nil, ErrUnsupportedVersion
}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return nil, err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return nil, err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
return nil, err
}
b.correlationID++
if !promiseResponse {
// Record request latency without the response
b.updateRequestLatencyMetrics(time.Since(requestTime))
return nil, nil
}
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
b.responses <- promise
return &promise, nil
}
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
promise, err := b.send(req, res != nil)
if err != nil {
return err
}
if promise == nil {
return nil
}
select {
case buf := <-promise.packets:
return versionedDecode(buf, res, req.version())
case err = <-promise.errors:
return err
}
}
func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
b.id, err = pd.getInt32()
if err != nil {
return err
}
host, err := pd.getString()
if err != nil {
return err
}
port, err := pd.getInt32()
if err != nil {
return err
}
if version >= 1 {
b.rack, err = pd.getNullableString()
if err != nil {
return err
}
}
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
if _, _, err := net.SplitHostPort(b.addr); err != nil {
return err
}
return nil
}
func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
host, portstr, err := net.SplitHostPort(b.addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portstr)
if err != nil {
return err
}
pe.putInt32(b.id)
err = pe.putString(host)
if err != nil {
return err
}
pe.putInt32(int32(port))
if version >= 1 {
err = pe.putNullableString(b.rack)
if err != nil {
return err
}
}
return nil
}
func (b *Broker) responseReceiver() {
var dead error
header := make([]byte, 8)
for response := range b.responses {
if dead != nil {
response.errors <- dead
continue
}
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
if err != nil {
dead = err
response.errors <- err
continue
}
bytesReadHeader, err := io.ReadFull(b.conn, header)
requestLatency := time.Since(response.requestTime)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
decodedHeader := responseHeader{}
err = decode(header, &decodedHeader)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
if decodedHeader.correlationID != response.correlationID {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
// TODO if decoded ID < cur ID, discard until we catch up
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
response.errors <- dead
continue
}
buf := make([]byte, decodedHeader.length-4)
bytesReadBody, err := io.ReadFull(b.conn, buf)
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
if err != nil {
dead = err
response.errors <- err
continue
}
response.packets <- buf
}
close(b.done)
}
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
rb := &SaslHandshakeRequest{"PLAIN"}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
return err
}
b.correlationID++
//wait for the response
header := make([]byte, 8) // response header
_, err = io.ReadFull(b.conn, header)
if err != nil {
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
return err
}
length := binary.BigEndian.Uint32(header[:4])
payload := make([]byte, length-4)
n, err := io.ReadFull(b.conn, payload)
if err != nil {
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
return err
}
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
res := &SaslHandshakeResponse{}
err = versionedDecode(payload, res, 0)
if err != nil {
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
return err
}
if res.Err != ErrNoError {
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
return res.Err
}
Logger.Print("Successful SASL handshake")
return nil
}
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
//
// In SASL Plain, Kafka expects the auth header to be in the following format
// Message format (from https://tools.ietf.org/html/rfc4616):
//
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
// authcid = 1*SAFE ; MUST accept up to 255 octets
// authzid = 1*SAFE ; MUST accept up to 255 octets
// passwd = 1*SAFE ; MUST accept up to 255 octets
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
//
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
// ;; any UTF-8 encoded Unicode character except NUL
//
// When credentials are valid, Kafka returns a 4 byte array of null characters.
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
// of responding to bad credentials but thats how its being done today.
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
if b.conf.Net.SASL.Handshake {
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
if handshakeErr != nil {
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
return handshakeErr
}
}
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
authBytes := make([]byte, length+4) //4 byte length header + auth data
binary.BigEndian.PutUint32(authBytes, uint32(length))
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
return err
}
requestTime := time.Now()
bytesWritten, err := b.conn.Write(authBytes)
b.updateOutgoingCommunicationMetrics(bytesWritten)
if err != nil {
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
return err
}
header := make([]byte, 4)
n, err := io.ReadFull(b.conn, header)
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
// If the credentials are valid, we would get a 4 byte response filled with null characters.
// Otherwise, the broker closes the connection and we get an EOF
if err != nil {
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
return nil
}
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
b.updateRequestLatencyMetrics(requestLatency)
b.responseRate.Mark(1)
if b.brokerResponseRate != nil {
b.brokerResponseRate.Mark(1)
}
responseSize := int64(bytes)
b.incomingByteRate.Mark(responseSize)
if b.brokerIncomingByteRate != nil {
b.brokerIncomingByteRate.Mark(responseSize)
}
b.responseSize.Update(responseSize)
if b.brokerResponseSize != nil {
b.brokerResponseSize.Update(responseSize)
}
}
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
requestLatencyInMs := int64(requestLatency / time.Millisecond)
b.requestLatency.Update(requestLatencyInMs)
if b.brokerRequestLatency != nil {
b.brokerRequestLatency.Update(requestLatencyInMs)
}
}
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
b.requestRate.Mark(1)
if b.brokerRequestRate != nil {
b.brokerRequestRate.Mark(1)
}
requestSize := int64(bytes)
b.outgoingByteRate.Mark(requestSize)
if b.brokerOutgoingByteRate != nil {
b.brokerOutgoingByteRate.Mark(requestSize)
}
b.requestSize.Update(requestSize)
if b.brokerRequestSize != nil {
b.brokerRequestSize.Update(requestSize)
}
}

View file

@ -1,846 +0,0 @@
package sarama
import (
"math/rand"
"sort"
"sync"
"time"
)
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
// automatically when it passes out of scope. It is safe to share a client amongst many
// users, however Kafka will process requests from a single client strictly in serial,
// so it is generally more efficient to use the default one client per producer/consumer.
type Client interface {
// Config returns the Config struct of the client. This struct should not be
// altered after it has been created.
Config() *Config
// Controller returns the cluster controller broker.
Controller() (*Broker, error)
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
// Topics returns the set of available topics as retrieved from cluster metadata.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
Partitions(topic string) ([]int32, error)
// WritablePartitions returns the sorted list of all writable partition IDs for
// the given topic, where "writable" means "having a valid leader accepting
// writes".
WritablePartitions(topic string) ([]int32, error)
// Leader returns the broker object that is the leader of the current
// topic/partition, as determined by querying the cluster metadata.
Leader(topic string, partitionID int32) (*Broker, error)
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time (in milliseconds) on the topic/partition combination.
// Time should be OffsetOldest for the earliest available offset,
// OffsetNewest for the offset of the message that will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
// return a locally cached value if it's available. You can call
// RefreshCoordinator to update the cached value. This function only works on
// Kafka 0.8.2 and higher.
Coordinator(consumerGroup string) (*Broker, error)
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
// in local cache. This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// Close shuts down all broker connections managed by this client. It is required
// to call this function before a client object passes out of scope, as it will
// otherwise leak memory. You must close any Producers or Consumers using a client
// before you close the client.
Close() error
// Closed returns true if the client has already had Close called on it
Closed() bool
}
const (
// OffsetNewest stands for the log head offset, i.e. the offset that will be
// assigned to the next message that will be produced to the partition. You
// can send this to a client's GetOffset method to get this offset, or when
// calling ConsumePartition to start consuming new messages.
OffsetNewest int64 = -1
// OffsetOldest stands for the oldest offset available on the broker for a
// partition. You can send this to a client's GetOffset method to get this
// offset, or when calling ConsumePartition to start consuming from the
// oldest offset that is still available on the broker.
OffsetOldest int64 = -2
)
type client struct {
conf *Config
closer, closed chan none // for shutting down background metadata updater
// the broker addresses given to us through the constructor are not guaranteed to be returned in
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
// so we store them separately
seedBrokers []*Broker
deadSeeds []*Broker
controllerID int32 // cluster controller broker id
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
cachedPartitionsResults map[string][maxPartitionIndex][]int32
lock sync.RWMutex // protects access to the maps that hold cluster state.
}
// NewClient creates a new Client. It connects to one of the given broker addresses
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
// be retrieved from any of the given broker addresses, the client is not created.
func NewClient(addrs []string, conf *Config) (Client, error) {
Logger.Println("Initializing new client")
if conf == nil {
conf = NewConfig()
}
if err := conf.Validate(); err != nil {
return nil, err
}
if len(addrs) < 1 {
return nil, ConfigurationError("You must provide at least one broker address")
}
client := &client{
conf: conf,
closer: make(chan none),
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, index := range random.Perm(len(addrs)) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
}
go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client")
return client, nil
}
func (client *client) Config() *Config {
return client.conf
}
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
// so we go ahead and log the event in this case.
Logger.Printf("Close() called on already closed client")
return ErrClosedClient
}
// shutdown and wait for the background thread before we take the lock, to avoid races
close(client.closer)
<-client.closed
client.lock.Lock()
defer client.lock.Unlock()
Logger.Println("Closing Client")
for _, broker := range client.brokers {
safeAsyncClose(broker)
}
for _, broker := range client.seedBrokers {
safeAsyncClose(broker)
}
client.brokers = nil
client.metadata = nil
return nil
}
func (client *client) Closed() bool {
return client.brokers == nil
}
func (client *client) Topics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadata))
for topic := range client.metadata {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, allPartitions)
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) WritablePartitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, writablePartitions)
// len==0 catches when it's nil (no such topic) and the odd case when every single
// partition is undergoing leader election simultaneously. Callers have to be able to handle
// this function returning an empty slice (which is a valid return value) but catching it
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
// a metadata refresh as a nicety so callers can just try again and don't have to manually
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, writablePartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return dupInt32Slice(metadata.Replicas), metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return dupInt32Slice(metadata.Isr), metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
leader, err := client.cachedLeader(topic, partitionID)
if leader == nil {
err = client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
leader, err = client.cachedLeader(topic, partitionID)
}
return leader, err
}
func (client *client) RefreshMetadata(topics ...string) error {
if client.Closed() {
return ErrClosedClient
}
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
// error. This handles the case by returning an error instead of sending it
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
for _, topic := range topics {
if len(topic) == 0 {
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
if client.Closed() {
return -1, ErrClosedClient
}
offset, err := client.getOffset(topic, partitionID, time)
if err != nil {
if err := client.RefreshMetadata(topic); err != nil {
return -1, err
}
return client.getOffset(topic, partitionID, time)
}
return offset, err
}
func (client *client) Controller() (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
controller := client.cachedController()
if controller == nil {
if err := client.refreshMetadata(); err != nil {
return nil, err
}
controller = client.cachedController()
}
if controller == nil {
return nil, ErrControllerNotAvailable
}
_ = controller.Open(client.conf)
return controller, nil
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
coordinator := client.cachedCoordinator(consumerGroup)
if coordinator == nil {
if err := client.RefreshCoordinator(consumerGroup); err != nil {
return nil, err
}
coordinator = client.cachedCoordinator(consumerGroup)
}
if coordinator == nil {
return nil, ErrConsumerCoordinatorNotAvailable
}
_ = coordinator.Open(client.conf)
return coordinator, nil
}
func (client *client) RefreshCoordinator(consumerGroup string) error {
if client.Closed() {
return ErrClosedClient
}
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
if err != nil {
return err
}
client.lock.Lock()
defer client.lock.Unlock()
client.registerBroker(response.Coordinator)
client.coordinators[consumerGroup] = response.Coordinator.ID()
return nil
}
// private broker management helpers
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
// deregisterBroker removes a broker from the seedsBroker list, and if it's
// not the seedbroker, removes it from brokers map completely.
func (client *client) deregisterBroker(broker *Broker) {
client.lock.Lock()
defer client.lock.Unlock()
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
client.deadSeeds = append(client.deadSeeds, broker)
client.seedBrokers = client.seedBrokers[1:]
} else {
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
// but we really shouldn't have to; once that loop is made better this case can be
// removed, and the function generally can be renamed from `deregisterBroker` to
// `nextSeedBroker` or something
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
delete(client.brokers, broker.ID())
}
}
func (client *client) resurrectDeadBrokers() {
client.lock.Lock()
defer client.lock.Unlock()
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
client.deadSeeds = nil
}
func (client *client) any() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if len(client.seedBrokers) > 0 {
_ = client.seedBrokers[0].Open(client.conf)
return client.seedBrokers[0]
}
// not guaranteed to be random *or* deterministic
for _, broker := range client.brokers {
_ = broker.Open(client.conf)
return broker
}
return nil
}
// private caching/lazy metadata helpers
type partitionType int
const (
allPartitions partitionType = iota
writablePartitions
// If you add any more types, update the partition cache in update()
// Ensure this is the last partition type value
maxPartitionIndex
)
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
return partitions[partitionID]
}
return nil
}
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
client.lock.RLock()
defer client.lock.RUnlock()
partitions, exists := client.cachedPartitionsResults[topic]
if !exists {
return nil
}
return partitions[partitionSet]
}
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
partitions := client.metadata[topic]
if partitions == nil {
return nil
}
ret := make([]int32, 0, len(partitions))
for _, partition := range partitions {
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
continue
}
ret = append(ret, partition.ID)
}
sort.Sort(int32Slice(ret))
return ret
}
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
metadata, ok := partitions[partitionID]
if ok {
if metadata.Err == ErrLeaderNotAvailable {
return nil, ErrLeaderNotAvailable
}
b := client.brokers[metadata.Leader]
if b == nil {
return nil, ErrLeaderNotAvailable
}
_ = b.Open(client.conf)
return b, nil
}
}
return nil, ErrUnknownTopicOrPartition
}
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
broker, err := client.Leader(topic, partitionID)
if err != nil {
return -1, err
}
request := &OffsetRequest{}
if client.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 1
}
request.AddBlock(topic, partitionID, time, 1)
response, err := broker.GetAvailableOffsets(request)
if err != nil {
_ = broker.Close()
return -1, err
}
block := response.GetBlock(topic, partitionID)
if block == nil {
_ = broker.Close()
return -1, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return -1, block.Err
}
if len(block.Offsets) != 1 {
return -1, ErrOffsetOutOfRange
}
return block.Offsets[0], nil
}
// core metadata update logic
func (client *client) backgroundMetadataUpdater() {
defer close(client.closed)
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
return
}
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := client.refreshMetadata(); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
return
}
}
}
func (client *client) refreshMetadata() error {
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
return err
} else if len(specificTopics) == 0 {
return ErrNoTopicsToUpdateMetadata
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
return err
}
return nil
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
req := &MetadataRequest{Topics: topics}
if client.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 1
}
response, err := broker.GetMetadata(req)
switch err.(type) {
case nil:
allKnownMetaData := len(topics) == 0
// valid response, use it
shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
}
return err
case PacketEncodingError:
// didn't even send, return the error
return err
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
client.controllerID = data.ControllerID
if allKnownMetaData {
client.metadata = make(map[string]map[int32]*PartitionMetadata)
client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
}
for _, topic := range data.Topics {
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
err = topic.Err
continue
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
err = topic.Err
retry = true
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
continue
}
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
for _, partition := range topic.Partitions {
client.metadata[topic.Name][partition.ID] = partition
if partition.Err == ErrLeaderNotAvailable {
retry = true
}
}
var partitionCache [maxPartitionIndex][]int32
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
client.cachedPartitionsResults[topic.Name] = partitionCache
}
return
}
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
return client.brokers[coordinatorID]
}
return nil
}
func (client *client) cachedController() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
return client.brokers[client.controllerID]
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
retry := func(err error) (*FindCoordinatorResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
}
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
request := new(FindCoordinatorRequest)
request.CoordinatorKey = consumerGroup
request.CoordinatorType = CoordinatorGroup
response, err := broker.FindCoordinator(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
switch err.(type) {
case PacketEncodingError:
return nil, err
default:
_ = broker.Close()
client.deregisterBroker(broker)
continue
}
}
switch response.Err {
case ErrNoError:
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
return response, nil
case ErrConsumerCoordinatorNotAvailable:
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
// This is very ugly, but this scenario will only happen once per cluster.
// The __consumer_offsets topic only has to be created one time.
// The number of partitions not configurable, but partition 0 should always exist.
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
time.Sleep(2 * time.Second)
}
return retry(ErrConsumerCoordinatorNotAvailable)
default:
return nil, response.Err
}
}
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}

View file

@ -1,458 +0,0 @@
package sarama
import (
"compress/gzip"
"crypto/tls"
"fmt"
"io/ioutil"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
)
const defaultClientID = "sarama"
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
// How many outstanding requests a connection is allowed to have before
// sending on it blocks (default 5).
MaxOpenRequests int
// All three of the below configurations are similar to the
// `socket.timeout.ms` setting in JVM kafka. All of them default
// to 30 seconds.
DialTimeout time.Duration // How long to wait for the initial connection.
ReadTimeout time.Duration // How long to wait for a response.
WriteTimeout time.Duration // How long to wait for a transmit.
TLS struct {
// Whether or not to use TLS when connecting to the broker
// (defaults to false).
Enable bool
// The TLS configuration to use for secure connections if
// enabled (defaults to nil).
Config *tls.Config
}
// SASL based authentication with broker. While there are multiple SASL authentication methods
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
SASL struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
//username and password for SASL/PLAIN authentication
User string
Password string
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
KeepAlive time.Duration
}
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
// The total number of times to retry a metadata request when the
// cluster is in the middle of a leader election (default 3).
Max int
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
}
// Producer is the namespace for configuration related to producing messages,
// used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be
// set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
// JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of
// RequiredAcks (defaults to 10 seconds). This is only relevant when
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
// millisecond resolution, nanoseconds will be truncated. Equivalent to
// the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// The level of compression to use on messages. The meaning depends
// on the actual compression type used and defaults to default compression
// level for the codec.
CompressionLevel int
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the
// Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and
// sent to the broker. By default, messages are sent as fast as possible, and
// all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
// The best-effort number of bytes needed to trigger a flush. Use the
// global sarama.MaxRequestSize to set a hard upper limit.
Bytes int
// The best-effort number of messages needed to trigger a flush. Use
// `MaxMessages` to set a hard upper limit.
Messages int
// The best-effort frequency of flushes. Equivalent to
// `queue.buffering.max.ms` setting of JVM producer.
Frequency time.Duration
// The maximum number of messages the producer will send in a single
// broker request. Defaults to 0 for unlimited. Similar to
// `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
//
// Note that Sarama's Consumer type does not currently support automatic
// consumer-group rebalancing and offset tracking. For Zookeeper-based
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
// library builds on Sarama to add this support. For Kafka-based tracking
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
// builds on Sarama to add this support.
Consumer struct {
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
// given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker
// will wait until at least this many are available. The default is 1,
// as 0 causes the consumer to spin when no messages are available.
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 1MB). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a
// single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure
// this is at least as large as your largest message. Defaults to 0
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
// global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
// bytes to become available before it returns fewer than that anyways. The
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to
// process for the user. If writing to the Messages channel takes longer
// than this, that partition will stop fetching more messages until it
// can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
// If a message is not written to the Messages channel between two ticks
// of the expiryTicker then a timeout is detected.
// Using a ticker instead of a timer to detect timeouts should typically
// result in many fewer calls to Timer functions which may result in a
// significant performance improvement if many messages are being sent
// and timeouts are infrequent.
// The disadvantage of using a ticker instead of a timer is that
// timeouts will be less accurate. That is, the effective timeout could
// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
// between two messages being sent may not be recognized as a timeout.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
// you must read from them to prevent deadlock.
Return struct {
// If enabled, any errors that occurred while consuming are returned on
// the Errors channel (default disabled).
Errors bool
}
// Offsets specifies configuration for how and when to commit consumed
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// How frequently to commit updated offsets. Defaults to 1s.
CommitInterval time.Duration
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
// The retention duration for committed offsets. If zero, disabled
// (in which case the `offsets.retention.minutes` option on the
// broker will be used). Kafka only supports precision up to
// milliseconds; nanoseconds will be truncated. Requires Kafka
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
}
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
// The version of Kafka that Sarama will assume it is running against.
// Defaults to the oldest supported stable version. Since Kafka provides
// backwards-compatibility, setting it to a version older than you have
// will not break anything, although it may prevent you from using the
// latest features. Setting it to a version greater than you are actually
// running may lead to random breakage.
Version KafkaVersion
// The registry to define metrics into.
// Defaults to a local registry.
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
// prior to starting Sarama.
// See Examples on how to use the metrics registry
MetricRegistry metrics.Registry
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Producer.CompressionLevel = CompressionLevelDefault
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 1024 * 1024
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.CommitInterval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = MinVersion
c.MetricRegistry = metrics.NewRegistry()
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Net.SASL.Enable == false {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
if c.Net.SASL.Password != "" {
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
}
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
if c.Producer.Compression == CompressionGZIP {
if c.Producer.CompressionLevel != CompressionLevelDefault {
if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
}
}
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.CommitInterval <= 0:
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
case !validID.MatchString(c.ClientID):
return ConfigurationError("ClientID is invalid")
}
return nil
}

View file

@ -1,15 +0,0 @@
package sarama
type ConfigResourceType int8
// Taken from :
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
const (
UnknownResource ConfigResourceType = 0
AnyResource ConfigResourceType = 1
TopicResource ConfigResourceType = 2
GroupResource ConfigResourceType = 3
ClusterResource ConfigResourceType = 4
BrokerResource ConfigResourceType = 5
)

View file

@ -1,808 +0,0 @@
package sarama
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
Headers []*RecordHeader // only set if kafka is version 0.11+
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
responseResult error
closeOnce sync.Once
fetchSize int32
offset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
child.closeOnce.Do(func() {
close(child.dying)
})
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
for i, msg := range msgs {
messageSelect:
select {
case child.messages <- msg:
firstAttempt = true
case <-expiryTicker.C:
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
firstAttempt = false
goto messageSelect
}
}
}
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, msgBlock := range msgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
}
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
}
}
if len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
var messages []*ConsumerMessage
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
if offset < child.offset {
continue
}
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: rec.Key,
Value: rec.Value,
Offset: offset,
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
Headers: rec.Headers,
})
child.offset = offset + 1
}
if len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
if nRecs == 0 {
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if partialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
messages := []*ConsumerMessage{}
for _, records := range block.RecordsSet {
if control, err := records.isControl(); err != nil || control {
continue
}
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.MsgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
if err != nil {
return nil, err
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
break
}
}
}
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
request.Version = 4
request.Isolation = ReadUncommitted // We don't support yet transactions.
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

View file

@ -1,94 +0,0 @@
package sarama
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
UserData []byte
}
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putStringArray(m.Topics); err != nil {
return err
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
if m.Topics, err = pd.getStringArray(); err != nil {
return
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32
UserData []byte
}
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}

View file

@ -1,33 +0,0 @@
package sarama
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
tmp := new(FindCoordinatorRequest)
tmp.CoordinatorKey = r.ConsumerGroup
tmp.CoordinatorType = CoordinatorGroup
return tmp.encode(pe)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorRequest)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.ConsumerGroup = tmp.CoordinatorKey
return nil
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View file

@ -1,77 +0,0 @@
package sarama
import (
"net"
"strconv"
)
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp := new(FindCoordinatorResponse)
if err := tmp.decode(pd, version); err != nil {
return err
}
r.Err = tmp.Err
r.Coordinator = tmp.Coordinator
if tmp.Coordinator == nil {
return nil
}
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
if r.Coordinator == nil {
r.Coordinator = new(Broker)
r.Coordinator.id = r.CoordinatorID
r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
}
tmp := &FindCoordinatorResponse{
Version: 0,
Err: r.Err,
Coordinator: r.Coordinator,
}
if err := tmp.encode(pe); err != nil {
return err
}
return nil
}
func (r *ConsumerMetadataResponse) key() int16 {
return 10
}
func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View file

@ -1,69 +0,0 @@
package sarama
import (
"encoding/binary"
"fmt"
"hash/crc32"
)
type crcPolynomial int8
const (
crcIEEE crcPolynomial = iota
crcCastagnoli
)
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
polynomial crcPolynomial
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func newCRC32Field(polynomial crcPolynomial) *crc32Field {
return &crc32Field{polynomial: polynomial}
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc, err := c.crc(curOffset, buf)
if err != nil {
return err
}
expected := binary.BigEndian.Uint32(buf[c.startOffset:])
if crc != expected {
return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
}
return nil
}
func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
var tab *crc32.Table
switch c.polynomial {
case crcIEEE:
tab = crc32.IEEETable
case crcCastagnoli:
tab = castagnoliTable
default:
return 0, PacketDecodingError{"invalid CRC type"}
}
return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
}

View file

@ -1,121 +0,0 @@
package sarama
import "time"
type CreatePartitionsRequest struct {
TopicPartitions map[string]*TopicPartition
Timeout time.Duration
ValidateOnly bool
}
func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
return err
}
for topic, partition := range c.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := partition.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
pe.putBool(c.ValidateOnly)
return nil
}
func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitions = make(map[string]*TopicPartition, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitions[topic] = new(TopicPartition)
if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if c.ValidateOnly, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (r *CreatePartitionsRequest) key() int16 {
return 37
}
func (r *CreatePartitionsRequest) version() int16 {
return 0
}
func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartition struct {
Count int32
Assignment [][]int32
}
func (t *TopicPartition) encode(pe packetEncoder) error {
pe.putInt32(t.Count)
if len(t.Assignment) == 0 {
pe.putInt32(-1)
return nil
}
if err := pe.putArrayLength(len(t.Assignment)); err != nil {
return err
}
for _, assign := range t.Assignment {
if err := pe.putInt32Array(assign); err != nil {
return err
}
}
return nil
}
func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
if t.Count, err = pd.getInt32(); err != nil {
return err
}
n, err := pd.getInt32()
if err != nil {
return err
}
if n <= 0 {
return nil
}
t.Assignment = make([][]int32, n)
for i := 0; i < int(n); i++ {
if t.Assignment[i], err = pd.getInt32Array(); err != nil {
return err
}
}
return nil
}

View file

@ -1,94 +0,0 @@
package sarama
import "time"
type CreatePartitionsResponse struct {
ThrottleTime time.Duration
TopicPartitionErrors map[string]*TopicPartitionError
}
func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
return err
}
for topic, partitionError := range c.TopicPartitionErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := partitionError.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicPartitionErrors[topic] = new(TopicPartitionError)
if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (r *CreatePartitionsResponse) key() int16 {
return 37
}
func (r *CreatePartitionsResponse) version() int16 {
return 0
}
func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
return V1_0_0_0
}
type TopicPartitionError struct {
Err KError
ErrMsg *string
}
func (t *TopicPartitionError) encode(pe packetEncoder) error {
pe.putInt16(int16(t.Err))
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
return nil
}
func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kerr)
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View file

@ -1,174 +0,0 @@
package sarama
import (
"time"
)
type CreateTopicsRequest struct {
Version int16
TopicDetails map[string]*TopicDetail
Timeout time.Duration
ValidateOnly bool
}
func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
return err
}
for topic, detail := range c.TopicDetails {
if err := pe.putString(topic); err != nil {
return err
}
if err := detail.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
if c.Version >= 1 {
pe.putBool(c.ValidateOnly)
}
return nil
}
func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicDetails = make(map[string]*TopicDetail, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicDetails[topic] = new(TopicDetail)
if err = c.TopicDetails[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if version >= 1 {
c.ValidateOnly, err = pd.getBool()
if err != nil {
return err
}
c.Version = version
}
return nil
}
func (c *CreateTopicsRequest) key() int16 {
return 19
}
func (c *CreateTopicsRequest) version() int16 {
return c.Version
}
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicDetail struct {
NumPartitions int32
ReplicationFactor int16
ReplicaAssignment map[int32][]int32
ConfigEntries map[string]*string
}
func (t *TopicDetail) encode(pe packetEncoder) error {
pe.putInt32(t.NumPartitions)
pe.putInt16(t.ReplicationFactor)
if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
return err
}
for partition, assignment := range t.ReplicaAssignment {
pe.putInt32(partition)
if err := pe.putInt32Array(assignment); err != nil {
return err
}
}
if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range t.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
if t.NumPartitions, err = pd.getInt32(); err != nil {
return err
}
if t.ReplicationFactor, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ReplicaAssignment = make(map[int32][]int32, n)
for i := 0; i < n; i++ {
replica, err := pd.getInt32()
if err != nil {
return err
}
if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
return err
}
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return nil
}

View file

@ -1,112 +0,0 @@
package sarama
import "time"
type CreateTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrors map[string]*TopicError
}
func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
if c.Version >= 2 {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
return err
}
for topic, topicError := range c.TopicErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := topicError.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
if version >= 2 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicErrors = make(map[string]*TopicError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicErrors[topic] = new(TopicError)
if err := c.TopicErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) key() int16 {
return 19
}
func (c *CreateTopicsResponse) version() int16 {
return c.Version
}
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicError struct {
Err KError
ErrMsg *string
}
func (t *TopicError) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(t.Err))
if version >= 1 {
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
}
return nil
}
func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
if version >= 1 {
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
return nil
}

View file

@ -1,30 +0,0 @@
package sarama
type DeleteGroupsRequest struct {
Groups []string
}
func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DeleteGroupsRequest) key() int16 {
return 42
}
func (r *DeleteGroupsRequest) version() int16 {
return 0
}
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
return V1_1_0_0
}
func (r *DeleteGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View file

@ -1,70 +0,0 @@
package sarama
import (
"time"
)
type DeleteGroupsResponse struct {
ThrottleTime time.Duration
GroupErrorCodes map[string]KError
}
func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
return err
}
for groupID, errorCode := range r.GroupErrorCodes {
if err := pe.putString(groupID); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
groupID, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
r.GroupErrorCodes[groupID] = KError(errorCode)
}
return nil
}
func (r *DeleteGroupsResponse) key() int16 {
return 42
}
func (r *DeleteGroupsResponse) version() int16 {
return 0
}
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
return V1_1_0_0
}

View file

@ -1,126 +0,0 @@
package sarama
import (
"sort"
"time"
)
// request message format is:
// [topic] timeout(int32)
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) offset(int64)
type DeleteRecordsRequest struct {
Topics map[string]*DeleteRecordsRequestTopic
Timeout time.Duration
}
func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsRequestTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (d *DeleteRecordsRequest) key() int16 {
return 21
}
func (d *DeleteRecordsRequest) version() int16 {
return 0
}
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsRequestTopic struct {
PartitionOffsets map[int32]int64 // partition => offset
}
func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
return err
}
keys := make([]int32, 0, len(t.PartitionOffsets))
for partition := range t.PartitionOffsets {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
pe.putInt64(t.PartitionOffsets[partition])
}
return nil
}
func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.PartitionOffsets = make(map[int32]int64, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
offset, err := pd.getInt64()
if err != nil {
return err
}
t.PartitionOffsets[partition] = offset
}
}
return nil
}

View file

@ -1,158 +0,0 @@
package sarama
import (
"sort"
"time"
)
// response message format is:
// throttleMs(int32) [topic]
// where topic is:
// name(string) [partition]
// where partition is:
// id(int32) low_watermark(int64) error_code(int16)
type DeleteRecordsResponse struct {
Version int16
ThrottleTime time.Duration
Topics map[string]*DeleteRecordsResponseTopic
}
func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(d.Topics)); err != nil {
return err
}
keys := make([]string, 0, len(d.Topics))
for topic := range d.Topics {
keys = append(keys, topic)
}
sort.Strings(keys)
for _, topic := range keys {
if err := pe.putString(topic); err != nil {
return err
}
if err := d.Topics[topic].encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
d.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
details := new(DeleteRecordsResponseTopic)
if err = details.decode(pd, version); err != nil {
return err
}
d.Topics[topic] = details
}
}
return nil
}
func (d *DeleteRecordsResponse) key() int16 {
return 21
}
func (d *DeleteRecordsResponse) version() int16 {
return 0
}
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type DeleteRecordsResponseTopic struct {
Partitions map[int32]*DeleteRecordsResponsePartition
}
func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(t.Partitions)); err != nil {
return err
}
keys := make([]int32, 0, len(t.Partitions))
for partition := range t.Partitions {
keys = append(keys, partition)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
for _, partition := range keys {
pe.putInt32(partition)
if err := t.Partitions[partition].encode(pe); err != nil {
return err
}
}
return nil
}
func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
for i := 0; i < n; i++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
details := new(DeleteRecordsResponsePartition)
if err = details.decode(pd, version); err != nil {
return err
}
t.Partitions[partition] = details
}
}
return nil
}
type DeleteRecordsResponsePartition struct {
LowWatermark int64
Err KError
}
func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
pe.putInt64(t.LowWatermark)
pe.putInt16(int16(t.Err))
return nil
}
func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
lowWatermark, err := pd.getInt64()
if err != nil {
return err
}
t.LowWatermark = lowWatermark
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
return nil
}

View file

@ -1,48 +0,0 @@
package sarama
import "time"
type DeleteTopicsRequest struct {
Version int16
Topics []string
Timeout time.Duration
}
func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putStringArray(d.Topics); err != nil {
return err
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
if d.Topics, err = pd.getStringArray(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
d.Version = version
return nil
}
func (d *DeleteTopicsRequest) key() int16 {
return 20
}
func (d *DeleteTopicsRequest) version() int16 {
return d.Version
}
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View file

@ -1,78 +0,0 @@
package sarama
import "time"
type DeleteTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrorCodes map[string]KError
}
func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
if d.Version >= 1 {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
return err
}
for topic, errorCode := range d.TopicErrorCodes {
if err := pe.putString(topic); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
d.Version = version
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.TopicErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
d.TopicErrorCodes[topic] = KError(errorCode)
}
return nil
}
func (d *DeleteTopicsResponse) key() int16 {
return 20
}
func (d *DeleteTopicsResponse) version() int16 {
return d.Version
}
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View file

@ -1,91 +0,0 @@
package sarama
type ConfigResource struct {
Type ConfigResourceType
Name string
ConfigNames []string
}
type DescribeConfigsRequest struct {
Resources []*ConfigResource
}
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
pe.putInt8(int8(c.Type))
if err := pe.putString(c.Name); err != nil {
return err
}
if len(c.ConfigNames) == 0 {
pe.putInt32(-1)
continue
}
if err := pe.putStringArray(c.ConfigNames); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ConfigResource, n)
for i := 0; i < n; i++ {
r.Resources[i] = &ConfigResource{}
t, err := pd.getInt8()
if err != nil {
return err
}
r.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Resources[i].Name = name
confLength, err := pd.getArrayLength()
if err != nil {
return err
}
if confLength == -1 {
continue
}
cfnames := make([]string, confLength)
for i := 0; i < confLength; i++ {
s, err := pd.getString()
if err != nil {
return err
}
cfnames[i] = s
}
r.Resources[i].ConfigNames = cfnames
}
return nil
}
func (r *DescribeConfigsRequest) key() int16 {
return 32
}
func (r *DescribeConfigsRequest) version() int16 {
return 0
}
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,188 +0,0 @@
package sarama
import "time"
type DescribeConfigsResponse struct {
ThrottleTime time.Duration
Resources []*ResourceResponse
}
type ResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
Configs []*ConfigEntry
}
type ConfigEntry struct {
Name string
Value string
ReadOnly bool
Default bool
Sensitive bool
}
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err = pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ResourceResponse, n)
for i := 0; i < n; i++ {
rr := &ResourceResponse{}
if err := rr.decode(pd, version); err != nil {
return err
}
r.Resources[i] = rr
}
return nil
}
func (r *DescribeConfigsResponse) key() int16 {
return 32
}
func (r *DescribeConfigsResponse) version() int16 {
return 0
}
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
pe.putInt16(r.ErrorCode)
if err = pe.putString(r.ErrorMsg); err != nil {
return err
}
pe.putInt8(int8(r.Type))
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putArrayLength(len(r.Configs)); err != nil {
return err
}
for _, c := range r.Configs {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
ec, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = ec
em, err := pd.getString()
if err != nil {
return err
}
r.ErrorMsg = em
t, err := pd.getInt8()
if err != nil {
return err
}
r.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Configs = make([]*ConfigEntry, n)
for i := 0; i < n; i++ {
c := &ConfigEntry{}
if err := c.decode(pd, version); err != nil {
return err
}
r.Configs[i] = c
}
return nil
}
func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putString(r.Value); err != nil {
return err
}
pe.putBool(r.ReadOnly)
pe.putBool(r.Default)
pe.putBool(r.Sensitive)
return nil
}
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
value, err := pd.getString()
if err != nil {
return err
}
r.Value = value
read, err := pd.getBool()
if err != nil {
return err
}
r.ReadOnly = read
de, err := pd.getBool()
if err != nil {
return err
}
r.Default = de
sensitive, err := pd.getBool()
if err != nil {
return err
}
r.Sensitive = sensitive
return nil
}

View file

@ -1,30 +0,0 @@
package sarama
type DescribeGroupsRequest struct {
Groups []string
}
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DescribeGroupsRequest) key() int16 {
return 15
}
func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *DescribeGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View file

@ -1,187 +0,0 @@
package sarama
type DescribeGroupsResponse struct {
Groups []*GroupDescription
}
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for _, groupDescription := range r.Groups {
if err := groupDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Groups = make([]*GroupDescription, n)
for i := 0; i < n; i++ {
r.Groups[i] = new(GroupDescription)
if err := r.Groups[i].decode(pd); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) key() int16 {
return 15
}
func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}
type GroupDescription struct {
Err KError
GroupId string
State string
ProtocolType string
Protocol string
Members map[string]*GroupMemberDescription
}
func (gd *GroupDescription) encode(pe packetEncoder) error {
pe.putInt16(int16(gd.Err))
if err := pe.putString(gd.GroupId); err != nil {
return err
}
if err := pe.putString(gd.State); err != nil {
return err
}
if err := pe.putString(gd.ProtocolType); err != nil {
return err
}
if err := pe.putString(gd.Protocol); err != nil {
return err
}
if err := pe.putArrayLength(len(gd.Members)); err != nil {
return err
}
for memberId, groupMemberDescription := range gd.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := groupMemberDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil {
return
}
if gd.State, err = pd.getString(); err != nil {
return
}
if gd.ProtocolType, err = pd.getString(); err != nil {
return
}
if gd.Protocol, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
gd.Members = make(map[string]*GroupMemberDescription)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
gd.Members[memberId] = new(GroupMemberDescription)
if err := gd.Members[memberId].decode(pd); err != nil {
return err
}
}
return nil
}
type GroupMemberDescription struct {
ClientId string
ClientHost string
MemberMetadata []byte
MemberAssignment []byte
}
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
if err := pe.putString(gmd.ClientId); err != nil {
return err
}
if err := pe.putString(gmd.ClientHost); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
return err
}
return nil
}
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
if gmd.ClientId, err = pd.getString(); err != nil {
return
}
if gmd.ClientHost, err = pd.getString(); err != nil {
return
}
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
return
}
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
return
}
return nil
}
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(gmd.MemberAssignment, assignment)
return assignment, err
}
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
metadata := new(ConsumerGroupMemberMetadata)
err := decode(gmd.MemberMetadata, metadata)
return metadata, err
}

View file

@ -1,89 +0,0 @@
package sarama
import (
"fmt"
"github.com/rcrowley/go-metrics"
)
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
realEnc.registry = metricRegistry
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
type versionedDecoder interface {
decode(pd packetDecoder, version int16) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper, version)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

View file

@ -1,50 +0,0 @@
package sarama
type EndTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TransactionResult bool
}
func (a *EndTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
pe.putBool(a.TransactionResult)
return nil
}
func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.TransactionResult, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (a *EndTxnRequest) key() int16 {
return 26
}
func (a *EndTxnRequest) version() int16 {
return 0
}
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,44 +0,0 @@
package sarama
import (
"time"
)
type EndTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (e *EndTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
pe.putInt16(int16(e.Err))
return nil
}
func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
e.Err = KError(kerr)
return nil
}
func (e *EndTxnResponse) key() int16 {
return 25
}
func (e *EndTxnResponse) version() int16 {
return 0
}
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,281 +0,0 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
// a RecordBatch.
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
// is lower than 0.10.0.0.
var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
// the metadata.
var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
ErrOutOfOrderSequenceNumber KError = 45
ErrDuplicateSequenceNumber KError = 46
ErrInvalidProducerEpoch KError = 47
ErrInvalidTxnState KError = 48
ErrInvalidProducerIDMapping KError = 49
ErrInvalidTransactionTimeout KError = 50
ErrConcurrentTransactions KError = 51
ErrTransactionCoordinatorFenced KError = 52
ErrTransactionalIDAuthorizationFailed KError = 53
ErrSecurityDisabled KError = 54
ErrOperationNotAttempted KError = 55
ErrKafkaStorageError KError = 56
ErrLogDirNotFound KError = 57
ErrSASLAuthenticationFailed KError = 58
ErrUnknownProducerID KError = 59
ErrReassignmentInProgress KError = 60
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica information not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrNetworkException:
return "kafka server: The server disconnected before a response was received."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
case ErrInvalidRequiredAcks:
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
case ErrIllegalGeneration:
return "kafka server: The provided generation id is not the current generation."
case ErrInconsistentGroupProtocol:
return "kafka server: The provider group protocol type is incompatible with the other members."
case ErrInvalidGroupId:
return "kafka server: The provided group id was empty."
case ErrUnknownMemberId:
return "kafka server: The provided member is not known in the current generation."
case ErrInvalidSessionTimeout:
return "kafka server: The provided session timeout is outside the allowed range."
case ErrRebalanceInProgress:
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
case ErrInvalidCommitOffsetSize:
return "kafka server: The provided commit metadata was too large."
case ErrTopicAuthorizationFailed:
return "kafka server: The client is not authorized to access this topic."
case ErrGroupAuthorizationFailed:
return "kafka server: The client is not authorized to access this group."
case ErrClusterAuthorizationFailed:
return "kafka server: The client is not authorized to send this request type."
case ErrInvalidTimestamp:
return "kafka server: The timestamp of the message is out of acceptable range."
case ErrUnsupportedSASLMechanism:
return "kafka server: The broker does not support the requested SASL mechanism."
case ErrIllegalSASLState:
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
case ErrOutOfOrderSequenceNumber:
return "kafka server: The broker received an out of order sequence number."
case ErrDuplicateSequenceNumber:
return "kafka server: The broker received a duplicate sequence number."
case ErrInvalidProducerEpoch:
return "kafka server: Producer attempted an operation with an old epoch."
case ErrInvalidTxnState:
return "kafka server: The producer attempted a transactional operation in an invalid state."
case ErrInvalidProducerIDMapping:
return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
case ErrInvalidTransactionTimeout:
return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
case ErrConcurrentTransactions:
return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
case ErrTransactionCoordinatorFenced:
return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
case ErrTransactionalIDAuthorizationFailed:
return "kafka server: Transactional ID authorization failed."
case ErrSecurityDisabled:
return "kafka server: Security features are disabled."
case ErrOperationNotAttempted:
return "kafka server: The broker did not attempt to execute this operation."
case ErrKafkaStorageError:
return "kafka server: Disk error when trying to access log file on the disk."
case ErrLogDirNotFound:
return "kafka server: The specified log directory is not found in the broker config."
case ErrSASLAuthenticationFailed:
return "kafka server: SASL Authentication failed."
case ErrUnknownProducerID:
return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
case ErrReassignmentInProgress:
return "kafka server: A partition reassignment is in progress."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

View file

@ -1,170 +0,0 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(b.fetchOffset)
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
Isolation IsolationLevel
blocks map[string]map[int32]*fetchRequestBlock
}
type IsolationLevel int8
const (
ReadUncommitted IsolationLevel = 0
ReadCommitted IsolationLevel = 1
)
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version >= 3 {
pe.putInt32(r.MaxBytes)
}
if r.Version >= 4 {
pe.putInt8(int8(r.Isolation))
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, blocks := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version >= 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
if r.Version >= 4 {
isolation, err := pd.getInt8()
if err != nil {
return err
}
r.Isolation = IsolationLevel(isolation)
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
return nil
}
func (r *FetchRequest) key() int16 {
return 1
}
func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
r.blocks[topic][partitionID] = tmp
}

View file

@ -1,385 +0,0 @@
package sarama
import (
"time"
)
type AbortedTransaction struct {
ProducerID int64
FirstOffset int64
}
func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.FirstOffset, err = pd.getInt64(); err != nil {
return err
}
return nil
}
func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
pe.putInt64(t.ProducerID)
pe.putInt64(t.FirstOffset)
return nil
}
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
LastStableOffset int64
AbortedTransactions []*AbortedTransaction
Records *Records // deprecated: use FetchResponseBlock.Records
RecordsSet []*Records
Partial bool
}
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 4 {
b.LastStableOffset, err = pd.getInt64()
if err != nil {
return err
}
numTransact, err := pd.getArrayLength()
if err != nil {
return err
}
if numTransact >= 0 {
b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
}
for i := 0; i < numTransact; i++ {
transact := new(AbortedTransaction)
if err = transact.decode(pd); err != nil {
return err
}
b.AbortedTransactions[i] = transact
}
}
recordsSize, err := pd.getInt32()
if err != nil {
return err
}
recordsDecoder, err := pd.getSubset(int(recordsSize))
if err != nil {
return err
}
b.RecordsSet = []*Records{}
for recordsDecoder.remaining() > 0 {
records := &Records{}
if err := records.decode(recordsDecoder); err != nil {
// If we have at least one decoded records, this is not an error
if err == ErrInsufficientData {
if len(b.RecordsSet) == 0 {
b.Partial = true
}
break
}
return err
}
partial, err := records.isPartial()
if err != nil {
return err
}
// If we have at least one full records, we skip incomplete ones
if partial && len(b.RecordsSet) > 0 {
break
}
b.RecordsSet = append(b.RecordsSet, records)
if b.Records == nil {
b.Records = records
}
}
return nil
}
func (b *FetchResponseBlock) numRecords() (int, error) {
sum := 0
for _, records := range b.RecordsSet {
count, err := records.numRecords()
if err != nil {
return 0, err
}
sum += count
}
return sum, nil
}
func (b *FetchResponseBlock) isPartial() (bool, error) {
if b.Partial {
return true, nil
}
if len(b.RecordsSet) == 1 {
return b.RecordsSet[0].isPartial()
}
return false, nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
if version >= 4 {
pe.putInt64(b.LastStableOffset)
if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
return err
}
for _, transact := range b.AbortedTransactions {
if err = transact.encode(pe); err != nil {
return err
}
}
}
pe.push(&lengthField{})
for _, records := range b.RecordsSet {
err = records.encode(pe)
if err != nil {
return err
}
}
return pe.pop()
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
Version int16 // v1 requires 0.9+, v2 requires 0.10+
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
throttle, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe, r.Version)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchResponse) key() int16 {
return 1
}
func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
case 4:
return V0_11_0_0
default:
return MinVersion
}
}
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
return frb
}
func encodeKV(key, value Encoder) ([]byte, []byte) {
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
return kb, vb
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
if len(frb.RecordsSet) == 0 {
records := newLegacyRecords(&MessageSet{})
frb.RecordsSet = []*Records{&records}
}
set := frb.RecordsSet[0].MsgSet
set.Messages = append(set.Messages, msgBlock)
}
func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.addRecord(rec)
}
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
frb := r.getOrCreateBlock(topic, partition)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].RecordBatch
batch.LastOffsetDelta = offset
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset
}

View file

@ -1,61 +0,0 @@
package sarama
type CoordinatorType int8
const (
CoordinatorGroup CoordinatorType = 0
CoordinatorTransaction CoordinatorType = 1
)
type FindCoordinatorRequest struct {
Version int16
CoordinatorKey string
CoordinatorType CoordinatorType
}
func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
if err := pe.putString(f.CoordinatorKey); err != nil {
return err
}
if f.Version >= 1 {
pe.putInt8(int8(f.CoordinatorType))
}
return nil
}
func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
if f.CoordinatorKey, err = pd.getString(); err != nil {
return err
}
if version >= 1 {
f.Version = version
coordinatorType, err := pd.getInt8()
if err != nil {
return err
}
f.CoordinatorType = CoordinatorType(coordinatorType)
}
return nil
}
func (f *FindCoordinatorRequest) key() int16 {
return 10
}
func (f *FindCoordinatorRequest) version() int16 {
return f.Version
}
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

View file

@ -1,92 +0,0 @@
package sarama
import (
"time"
)
var NoNode = &Broker{id: -1, addr: ":-1"}
type FindCoordinatorResponse struct {
Version int16
ThrottleTime time.Duration
Err KError
ErrMsg *string
Coordinator *Broker
}
func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
f.Version = version
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(tmp)
if version >= 1 {
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
coordinator := new(Broker)
// The version is hardcoded to 0, as version 1 of the Broker-decode
// contains the rack-field which is not present in the FindCoordinatorResponse.
if err := coordinator.decode(pd, 0); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
f.Coordinator = coordinator
return nil
}
func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
if f.Version >= 1 {
pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
}
pe.putInt16(int16(f.Err))
if f.Version >= 1 {
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
}
coordinator := f.Coordinator
if coordinator == nil {
coordinator = NoNode
}
if err := coordinator.encode(pe, 0); err != nil {
return err
}
return nil
}
func (f *FindCoordinatorResponse) key() int16 {
return 10
}
func (f *FindCoordinatorResponse) version() int16 {
return f.Version
}
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
switch f.Version {
case 1:
return V0_11_0_0
default:
return V0_8_2_0
}
}

View file

@ -1,47 +0,0 @@
package sarama
type HeartbeatRequest struct {
GroupId string
GenerationId int32
MemberId string
}
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *HeartbeatRequest) key() int16 {
return 12
}
func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View file

@ -1,32 +0,0 @@
package sarama
type HeartbeatResponse struct {
Err KError
}
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *HeartbeatResponse) key() int16 {
return 12
}
func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View file

@ -1,43 +0,0 @@
package sarama
import "time"
type InitProducerIDRequest struct {
TransactionalID *string
TransactionTimeout time.Duration
}
func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
if err := pe.putNullableString(i.TransactionalID); err != nil {
return err
}
pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
return nil
}
func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
if i.TransactionalID, err = pd.getNullableString(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (i *InitProducerIDRequest) key() int16 {
return 22
}
func (i *InitProducerIDRequest) version() int16 {
return 0
}
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,55 +0,0 @@
package sarama
import "time"
type InitProducerIDResponse struct {
ThrottleTime time.Duration
Err KError
ProducerID int64
ProducerEpoch int16
}
func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
pe.putInt16(int16(i.Err))
pe.putInt64(i.ProducerID)
pe.putInt16(i.ProducerEpoch)
return nil
}
func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
i.Err = KError(kerr)
if i.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if i.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
return nil
}
func (i *InitProducerIDResponse) key() int16 {
return 22
}
func (i *InitProducerIDResponse) version() int16 {
return 0
}
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -1,163 +0,0 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
Version int16
GroupId string
SessionTimeout int32
RebalanceTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.SessionTimeout)
if r.Version >= 1 {
pe.putInt32(r.RebalanceTimeout)
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putString(r.ProtocolType); err != nil {
return err
}
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.SessionTimeout, err = pd.getInt32(); err != nil {
return
}
if version >= 1 {
if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
return err
}
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
if r.ProtocolType, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
}
func (r *JoinGroupRequest) key() int16 {
return 11
}
func (r *JoinGroupRequest) version() int16 {
return r.Version
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
bin, err := encode(metadata, nil)
if err != nil {
return err
}
r.AddGroupProtocol(name, bin)
return nil
}

View file

@ -1,135 +0,0 @@
package sarama
type JoinGroupResponse struct {
Version int16
ThrottleTime int32
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
for id, bin := range r.Members {
meta := new(ConsumerGroupMemberMetadata)
if err := decode(bin, meta); err != nil {
return nil, err
}
members[id] = *meta
}
return members, nil
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
if r.Version >= 2 {
pe.putInt32(r.ThrottleTime)
}
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
if err := pe.putString(r.GroupProtocol); err != nil {
return err
}
if err := pe.putString(r.LeaderId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Members)); err != nil {
return err
}
for memberId, memberMetadata := range r.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberMetadata); err != nil {
return err
}
}
return nil
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if version >= 2 {
if r.ThrottleTime, err = pd.getInt32(); err != nil {
return
}
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.GroupProtocol, err = pd.getString(); err != nil {
return
}
if r.LeaderId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Members = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberMetadata, err := pd.getBytes()
if err != nil {
return err
}
r.Members[memberId] = memberMetadata
}
return nil
}
func (r *JoinGroupResponse) key() int16 {
return 11
}
func (r *JoinGroupResponse) version() int16 {
return r.Version
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 2:
return V0_11_0_0
case 1:
return V0_10_1_0
default:
return V0_9_0_0
}
}

View file

@ -1,40 +0,0 @@
package sarama
type LeaveGroupRequest struct {
GroupId string
MemberId string
}
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *LeaveGroupRequest) key() int16 {
return 13
}
func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View file

@ -1,32 +0,0 @@
package sarama
type LeaveGroupResponse struct {
Err KError
}
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *LeaveGroupResponse) key() int16 {
return 13
}
func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View file

@ -1,69 +0,0 @@
package sarama
import "encoding/binary"
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
return PacketDecodingError{"length field invalid"}
}
return nil
}
type varintLengthField struct {
startOffset int
length int64
}
func (l *varintLengthField) decode(pd packetDecoder) error {
var err error
l.length, err = pd.getVarint()
return err
}
func (l *varintLengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *varintLengthField) adjustLength(currOffset int) int {
oldFieldSize := l.reserveLength()
l.length = int64(currOffset - l.startOffset - oldFieldSize)
return l.reserveLength() - oldFieldSize
}
func (l *varintLengthField) reserveLength() int {
var tmp [binary.MaxVarintLen64]byte
return binary.PutVarint(tmp[:], l.length)
}
func (l *varintLengthField) run(curOffset int, buf []byte) error {
binary.PutVarint(buf[l.startOffset:], l.length)
return nil
}
func (l *varintLengthField) check(curOffset int, buf []byte) error {
if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
return PacketDecodingError{"length field invalid"}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show more