Dep ensure -update (#1912)
* dep ensure -update Signed-off-by: Miek Gieben <miek@miek.nl> * Add new files Signed-off-by: Miek Gieben <miek@miek.nl>
This commit is contained in:
parent
6fe27d99be
commit
9d555ab8d2
1505 changed files with 179032 additions and 208137 deletions
349
Gopkg.lock
generated
349
Gopkg.lock
generated
|
@ -3,19 +3,15 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/DataDog/dd-trace-go"
|
||||
packages = [
|
||||
"opentracing",
|
||||
"tracer",
|
||||
"tracer/ext"
|
||||
]
|
||||
packages = ["opentracing","tracer","tracer/ext"]
|
||||
revision = "27617015d45e6cd550b9a7ac7715c37cc2f7d020"
|
||||
version = "v0.6.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Shopify/sarama"
|
||||
packages = ["."]
|
||||
revision = "f7be6aa2bc7b2e38edf816b08b582782194a1c02"
|
||||
version = "v1.16.0"
|
||||
revision = "35324cf48e33d8260e1c7c18854465a904ade249"
|
||||
version = "v1.17.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/apache/thrift"
|
||||
|
@ -24,61 +20,15 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/route53",
|
||||
"service/route53/route53iface",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "ee7b4b1162937cba700de23bd90acb742982e626"
|
||||
version = "v1.13.50"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/sdkio","internal/sdkrand","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/route53","service/route53/route53iface","service/sts"]
|
||||
revision = "852052a10992d92f68b9a60862a3312292524903"
|
||||
version = "v1.14.17"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/etcd"
|
||||
packages = [
|
||||
"auth/authpb",
|
||||
"client",
|
||||
"clientv3",
|
||||
"etcdserver/api/v3rpc/rpctypes",
|
||||
"etcdserver/etcdserverpb",
|
||||
"mvcc/mvccpb",
|
||||
"pkg/pathutil",
|
||||
"pkg/srv",
|
||||
"pkg/types",
|
||||
"version"
|
||||
]
|
||||
revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
|
||||
version = "v3.3.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/go-semver"
|
||||
packages = ["semver"]
|
||||
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
|
||||
version = "v0.2.0"
|
||||
packages = ["auth/authpb","clientv3","etcdserver/api/v3rpc/rpctypes","etcdserver/etcdserverpb","mvcc/mvccpb","pkg/types"]
|
||||
revision = "33245c6b5b49130ca99280408fadfab01aac0e48"
|
||||
version = "v3.3.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
|
@ -125,8 +75,8 @@
|
|||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
|
||||
version = "v1.36.0"
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
|
@ -136,12 +86,7 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys"
|
||||
]
|
||||
packages = ["gogoproto","proto","protoc-gen-gogo/descriptor","sortkeys"]
|
||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
||||
version = "v1.0.0"
|
||||
|
||||
|
@ -153,13 +98,7 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
|
@ -167,7 +106,7 @@
|
|||
branch = "master"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
revision = "553a641470496b2327abcac10b36396bd98e45c9"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -177,13 +116,9 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions"
|
||||
]
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
packages = ["OpenAPIv2","compiler","extensions"]
|
||||
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -194,10 +129,7 @@
|
|||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
]
|
||||
packages = [".","simplelru"]
|
||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
||||
|
||||
[[projects]]
|
||||
|
@ -209,8 +141,8 @@
|
|||
[[projects]]
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c"
|
||||
version = "v0.3.4"
|
||||
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
|
||||
version = "v0.3.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
|
@ -232,8 +164,8 @@
|
|||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
|
@ -255,38 +187,21 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [
|
||||
".",
|
||||
"ext",
|
||||
"log"
|
||||
]
|
||||
packages = [".","ext","log"]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/openzipkin/zipkin-go-opentracing"
|
||||
packages = [
|
||||
".",
|
||||
"flag",
|
||||
"thrift/gen-go/scribe",
|
||||
"thrift/gen-go/zipkincore",
|
||||
"types",
|
||||
"wire"
|
||||
]
|
||||
packages = [".","flag","thrift/gen-go/scribe","thrift/gen-go/zipkincore","types","wire"]
|
||||
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
|
||||
version = "v0.3.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pierrec/lz4"
|
||||
packages = ["."]
|
||||
revision = "2fcda4cb7018ce05a25959d2fe08c83e3329f169"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pierrec/xxHash"
|
||||
packages = ["xxHash32"]
|
||||
revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
|
||||
version = "v0.1.1"
|
||||
packages = [".","internal/xxh32"]
|
||||
revision = "1958fd8fff7f115e79725b1288e0b878b3e06b00"
|
||||
version = "v2.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -297,12 +212,8 @@
|
|||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c"
|
||||
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -325,49 +236,23 @@
|
|||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "1a580b3eff7814fc9b40602fd35256c63b50f491"
|
||||
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"trace"
|
||||
]
|
||||
revision = "2491c5de3490fced2f6cff376127c667efeed857"
|
||||
packages = ["context","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||
revision = "4cb1c02c05b0e749b0365f61ae859a8e0cfceed9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b"
|
||||
packages = ["unix","windows"]
|
||||
revision = "7138fd3d9dc8335c567ca206f4333fb75eb05d56"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
|
@ -381,39 +266,13 @@
|
|||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "7bb2a897381c9c5ab2aeb8614f758d7766af68ff"
|
||||
revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"channelz",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
|
||||
version = "v1.12.0"
|
||||
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","encoding/proto","grpclog","health/grpc_health_v1","internal","internal/backoff","internal/channelz","internal/grpcrand","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
version = "v1.13.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/inf.v0"
|
||||
|
@ -429,145 +288,23 @@
|
|||
|
||||
[[projects]]
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"networking/v1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1alpha1",
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1"
|
||||
]
|
||||
packages = ["admissionregistration/v1alpha1","admissionregistration/v1beta1","apps/v1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","events/v1beta1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1alpha1","storage/v1beta1"]
|
||||
revision = "73d903622b7391f3312dcbac6483fed484e185f8"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/cache",
|
||||
"pkg/util/clock",
|
||||
"pkg/util/diff",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/reflect"
|
||||
]
|
||||
packages = ["pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1beta1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/reflect"]
|
||||
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/retry"
|
||||
]
|
||||
packages = ["discovery","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1beta1","kubernetes/typed/apps/v1","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta2","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/events/v1beta1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1alpha1","kubernetes/typed/storage/v1beta1","pkg/apis/clientauthentication","pkg/apis/clientauthentication/v1alpha1","pkg/version","plugin/pkg/client/auth/exec","rest","rest/watch","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/pager","tools/reference","transport","util/buffer","util/cert","util/flowcontrol","util/homedir","util/integer","util/retry"]
|
||||
revision = "23781f4d6632d88e869066eaebb743857aa1ef9b"
|
||||
version = "v7.0.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "435926fcc83a4f1a93fd257248f2b1256eaa5a212159b07743408b8cafdbffff"
|
||||
inputs-digest = "cb1fb57b8b1053263afdf42b13581b0fe510bdc842616466ea7824563bfcf3ed"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
3
vendor/github.com/Shopify/sarama/.travis.yml
generated
vendored
3
vendor/github.com/Shopify/sarama/.travis.yml
generated
vendored
|
@ -2,6 +2,7 @@ language: go
|
|||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
env:
|
||||
global:
|
||||
|
@ -11,9 +12,9 @@ env:
|
|||
- KAFKA_HOSTNAME=localhost
|
||||
- DEBUG=true
|
||||
matrix:
|
||||
- KAFKA_VERSION=0.10.2.1
|
||||
- KAFKA_VERSION=0.11.0.2
|
||||
- KAFKA_VERSION=1.0.0
|
||||
- KAFKA_VERSION=1.1.0
|
||||
|
||||
before_install:
|
||||
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
|
||||
|
|
38
vendor/github.com/Shopify/sarama/CHANGELOG.md
generated
vendored
38
vendor/github.com/Shopify/sarama/CHANGELOG.md
generated
vendored
|
@ -1,5 +1,43 @@
|
|||
# Changelog
|
||||
|
||||
#### Version 1.17.0 (2018-05-30)
|
||||
|
||||
New Features:
|
||||
- Add support for gzip compression levels
|
||||
([#1044](https://github.com/Shopify/sarama/pull/1044)).
|
||||
- Add support for Metadata request/response pairs versions v1 to v5
|
||||
([#1047](https://github.com/Shopify/sarama/pull/1047),
|
||||
[#1069](https://github.com/Shopify/sarama/pull/1069)).
|
||||
- Add versioning to JoinGroup request/response pairs
|
||||
([#1098](https://github.com/Shopify/sarama/pull/1098))
|
||||
- Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
|
||||
([#1065](https://github.com/Shopify/sarama/pull/1065),
|
||||
[#1096](https://github.com/Shopify/sarama/pull/1096),
|
||||
[#1027](https://github.com/Shopify/sarama/pull/1027)).
|
||||
- Add `Controller()` method to Client interface
|
||||
([#1063](https://github.com/Shopify/sarama/pull/1063)).
|
||||
|
||||
Improvements:
|
||||
- ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
|
||||
([#1010](https://github.com/Shopify/sarama/pull/1010)).
|
||||
- Expose missing protocol parts: `msgSet` and `recordBatch`
|
||||
([#1049](https://github.com/Shopify/sarama/pull/1049)).
|
||||
- Add support for v1 DeleteTopics Request
|
||||
([#1052](https://github.com/Shopify/sarama/pull/1052)).
|
||||
- Add support for Go 1.10
|
||||
([#1064](https://github.com/Shopify/sarama/pull/1064)).
|
||||
- Claim support for Kafka 1.1.0
|
||||
([#1073](https://github.com/Shopify/sarama/pull/1073)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix FindCoordinatorResponse.encode to allow nil Coordinator
|
||||
([#1050](https://github.com/Shopify/sarama/pull/1050),
|
||||
[#1051](https://github.com/Shopify/sarama/pull/1051)).
|
||||
- Clear all metadata when we have the latest topic info
|
||||
([#1033](https://github.com/Shopify/sarama/pull/1033)).
|
||||
- Make `PartitionConsumer.Close` idempotent
|
||||
([#1092](https://github.com/Shopify/sarama/pull/1092)).
|
||||
|
||||
#### Version 1.16.0 (2018-02-12)
|
||||
|
||||
New Features:
|
||||
|
|
2
vendor/github.com/Shopify/sarama/LICENSE
generated
vendored
2
vendor/github.com/Shopify/sarama/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2013 Evan Huus
|
||||
Copyright (c) 2013 Shopify
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
|
|
5
vendor/github.com/Shopify/sarama/Makefile
generated
vendored
5
vendor/github.com/Shopify/sarama/Makefile
generated
vendored
|
@ -4,7 +4,7 @@ default: fmt vet errcheck test
|
|||
test:
|
||||
echo "" > coverage.txt
|
||||
for d in `go list ./... | grep -v vendor`; do \
|
||||
go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \
|
||||
go test -p 1 -v -timeout 90s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
|
@ -14,8 +14,9 @@ test:
|
|||
vet:
|
||||
go vet ./...
|
||||
|
||||
# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
|
||||
errcheck:
|
||||
errcheck github.com/Shopify/sarama/...
|
||||
errcheck -ignorepkg fmt github.com/Shopify/sarama/...
|
||||
|
||||
fmt:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
|
||||
|
|
2
vendor/github.com/Shopify/sarama/README.md
generated
vendored
2
vendor/github.com/Shopify/sarama/README.md
generated
vendored
|
@ -21,7 +21,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
|
|||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
|
||||
the two latest stable releases of Kafka and Go, and we provide a two month
|
||||
grace period for older releases. This means we currently officially support
|
||||
Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are
|
||||
Go 1.8 through 1.10, and Kafka 0.11 through 1.1, although older releases are
|
||||
still likely to work.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
|
|
7
vendor/github.com/Shopify/sarama/alter_configs_request_test.go
generated
vendored
7
vendor/github.com/Shopify/sarama/alter_configs_request_test.go
generated
vendored
|
@ -31,11 +31,7 @@ var (
|
|||
'1', '0', '0', '0',
|
||||
2, // a topic
|
||||
0, 3, 'b', 'a', 'r', // topic name: foo
|
||||
0, 0, 0, 2, //2 config
|
||||
0, 10, // 10 chars
|
||||
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
|
||||
0, 4,
|
||||
'1', '0', '0', '0',
|
||||
0, 0, 0, 1, //2 config
|
||||
0, 12, // 12 chars
|
||||
'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
|
||||
0, 4,
|
||||
|
@ -80,7 +76,6 @@ func TestAlterConfigsRequest(t *testing.T) {
|
|||
Type: TopicResource,
|
||||
Name: "bar",
|
||||
ConfigEntries: map[string]*string{
|
||||
"segment.ms": &configValue,
|
||||
"retention.ms": &configValue,
|
||||
},
|
||||
},
|
||||
|
|
5
vendor/github.com/Shopify/sarama/async_producer_test.go
generated
vendored
5
vendor/github.com/Shopify/sarama/async_producer_test.go
generated
vendored
|
@ -131,9 +131,12 @@ func TestAsyncProducer(t *testing.T) {
|
|||
if msg.Metadata.(int) != i {
|
||||
t.Error("Message metadata did not match")
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Errorf("Timeout waiting for msg #%d", i)
|
||||
goto done
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
|
|
64
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
64
vendor/github.com/Shopify/sarama/broker.go
generated
vendored
|
@ -18,6 +18,7 @@ import (
|
|||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
rack *string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
|
@ -230,6 +231,18 @@ func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*Consume
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
|
||||
response := new(FindCoordinatorResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
|
||||
response := new(OffsetResponse)
|
||||
|
||||
|
@ -373,6 +386,17 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse,
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
|
||||
response := new(CreatePartitionsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
|
||||
response := new(CreateTopicsResponse)
|
||||
|
||||
|
@ -395,6 +419,17 @@ func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsRespon
|
|||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
|
||||
response := new(DeleteRecordsResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
|
||||
response := new(DescribeAclsResponse)
|
||||
|
||||
|
@ -504,6 +539,17 @@ func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsRespon
|
|||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
|
||||
response := new(DeleteGroupsResponse)
|
||||
|
||||
if err := b.sendAndReceive(request, response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
@ -569,7 +615,7 @@ func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Broker) decode(pd packetDecoder) (err error) {
|
||||
func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
|
||||
b.id, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -585,6 +631,13 @@ func (b *Broker) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
b.rack, err = pd.getNullableString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
|
||||
if _, _, err := net.SplitHostPort(b.addr); err != nil {
|
||||
return err
|
||||
|
@ -593,7 +646,7 @@ func (b *Broker) decode(pd packetDecoder) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) encode(pe packetEncoder) (err error) {
|
||||
func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
|
||||
|
||||
host, portstr, err := net.SplitHostPort(b.addr)
|
||||
if err != nil {
|
||||
|
@ -613,6 +666,13 @@ func (b *Broker) encode(pe packetEncoder) (err error) {
|
|||
|
||||
pe.putInt32(int32(port))
|
||||
|
||||
if version >= 1 {
|
||||
err = pe.putNullableString(b.rack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
62
vendor/github.com/Shopify/sarama/broker_test.go
generated
vendored
62
vendor/github.com/Shopify/sarama/broker_test.go
generated
vendored
|
@ -71,7 +71,7 @@ func TestSimpleBrokerCommunication(t *testing.T) {
|
|||
// Set the broker id in order to validate local broker metrics
|
||||
broker.id = 0
|
||||
conf := NewConfig()
|
||||
conf.Version = V0_10_0_0
|
||||
conf.Version = tt.version
|
||||
err := broker.Open(conf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -97,11 +97,13 @@ func TestSimpleBrokerCommunication(t *testing.T) {
|
|||
|
||||
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
|
||||
var brokerTestTable = []struct {
|
||||
version KafkaVersion
|
||||
name string
|
||||
response []byte
|
||||
runner func(*testing.T, *Broker)
|
||||
}{
|
||||
{"MetadataRequest",
|
||||
{V0_10_0_0,
|
||||
"MetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := MetadataRequest{}
|
||||
|
@ -114,7 +116,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"ConsumerMetadataRequest",
|
||||
{V0_10_0_0,
|
||||
"ConsumerMetadataRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ConsumerMetadataRequest{}
|
||||
|
@ -127,7 +130,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (NoResponse)",
|
||||
{V0_10_0_0,
|
||||
"ProduceRequest (NoResponse)",
|
||||
[]byte{},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
|
@ -141,7 +145,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"ProduceRequest (WaitForLocal)",
|
||||
{V0_10_0_0,
|
||||
"ProduceRequest (WaitForLocal)",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
|
@ -155,7 +160,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"FetchRequest",
|
||||
{V0_10_0_0,
|
||||
"FetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := FetchRequest{}
|
||||
|
@ -168,7 +174,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"OffsetFetchRequest",
|
||||
{V0_10_0_0,
|
||||
"OffsetFetchRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetFetchRequest{}
|
||||
|
@ -181,7 +188,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"OffsetCommitRequest",
|
||||
{V0_10_0_0,
|
||||
"OffsetCommitRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetCommitRequest{}
|
||||
|
@ -194,7 +202,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"OffsetRequest",
|
||||
{V0_10_0_0,
|
||||
"OffsetRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetRequest{}
|
||||
|
@ -207,7 +216,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"JoinGroupRequest",
|
||||
{V0_10_0_0,
|
||||
"JoinGroupRequest",
|
||||
[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := JoinGroupRequest{}
|
||||
|
@ -220,7 +230,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"SyncGroupRequest",
|
||||
{V0_10_0_0,
|
||||
"SyncGroupRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := SyncGroupRequest{}
|
||||
|
@ -233,7 +244,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"LeaveGroupRequest",
|
||||
{V0_10_0_0,
|
||||
"LeaveGroupRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := LeaveGroupRequest{}
|
||||
|
@ -246,7 +258,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"HeartbeatRequest",
|
||||
{V0_10_0_0,
|
||||
"HeartbeatRequest",
|
||||
[]byte{0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := HeartbeatRequest{}
|
||||
|
@ -259,7 +272,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"ListGroupsRequest",
|
||||
{V0_10_0_0,
|
||||
"ListGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ListGroupsRequest{}
|
||||
|
@ -272,7 +286,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"DescribeGroupsRequest",
|
||||
{V0_10_0_0,
|
||||
"DescribeGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := DescribeGroupsRequest{}
|
||||
|
@ -285,7 +300,8 @@ var brokerTestTable = []struct {
|
|||
}
|
||||
}},
|
||||
|
||||
{"ApiVersionsRequest",
|
||||
{V0_10_0_0,
|
||||
"ApiVersionsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ApiVersionsRequest{}
|
||||
|
@ -297,6 +313,20 @@ var brokerTestTable = []struct {
|
|||
t.Error("ApiVersions request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{V1_1_0_0,
|
||||
"DeleteGroupsRequest",
|
||||
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := DeleteGroupsRequest{}
|
||||
response, err := broker.DeleteGroups(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("DeleteGroups request got no response!")
|
||||
}
|
||||
}},
|
||||
}
|
||||
|
||||
func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
|
||||
|
|
96
vendor/github.com/Shopify/sarama/client.go
generated
vendored
96
vendor/github.com/Shopify/sarama/client.go
generated
vendored
|
@ -17,6 +17,9 @@ type Client interface {
|
|||
// altered after it has been created.
|
||||
Config() *Config
|
||||
|
||||
// Controller returns the cluster controller broker.
|
||||
Controller() (*Broker, error)
|
||||
|
||||
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
|
||||
Brokers() []*Broker
|
||||
|
||||
|
@ -97,6 +100,7 @@ type client struct {
|
|||
seedBrokers []*Broker
|
||||
deadSeeds []*Broker
|
||||
|
||||
controllerID int32 // cluster controller broker id
|
||||
brokers map[int32]*Broker // maps broker ids to brokers
|
||||
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
|
||||
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
|
||||
|
@ -379,6 +383,27 @@ func (client *client) GetOffset(topic string, partitionID int32, time int64) (in
|
|||
return offset, err
|
||||
}
|
||||
|
||||
func (client *client) Controller() (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
controller := client.cachedController()
|
||||
if controller == nil {
|
||||
if err := client.refreshMetadata(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
controller = client.cachedController()
|
||||
}
|
||||
|
||||
if controller == nil {
|
||||
return nil, ErrControllerNotAvailable
|
||||
}
|
||||
|
||||
_ = controller.Open(client.conf)
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
|
@ -607,20 +632,7 @@ func (client *client) backgroundMetadataUpdater() {
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
topics := []string{}
|
||||
if !client.conf.Metadata.Full {
|
||||
if specificTopics, err := client.Topics(); err != nil {
|
||||
Logger.Println("Client background metadata topic load:", err)
|
||||
break
|
||||
} else if len(specificTopics) == 0 {
|
||||
Logger.Println("Client background metadata update: no specific topics to update")
|
||||
break
|
||||
} else {
|
||||
topics = specificTopics
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata(topics...); err != nil {
|
||||
if err := client.refreshMetadata(); err != nil {
|
||||
Logger.Println("Client background metadata update:", err)
|
||||
}
|
||||
case <-client.closer:
|
||||
|
@ -629,6 +641,26 @@ func (client *client) backgroundMetadataUpdater() {
|
|||
}
|
||||
}
|
||||
|
||||
func (client *client) refreshMetadata() error {
|
||||
topics := []string{}
|
||||
|
||||
if !client.conf.Metadata.Full {
|
||||
if specificTopics, err := client.Topics(); err != nil {
|
||||
return err
|
||||
} else if len(specificTopics) == 0 {
|
||||
return ErrNoTopicsToUpdateMetadata
|
||||
} else {
|
||||
topics = specificTopics
|
||||
}
|
||||
}
|
||||
|
||||
if err := client.RefreshMetadata(topics...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
|
||||
retry := func(err error) error {
|
||||
if attemptsRemaining > 0 {
|
||||
|
@ -645,12 +677,18 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
|
|||
} else {
|
||||
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
|
||||
}
|
||||
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
|
||||
|
||||
req := &MetadataRequest{Topics: topics}
|
||||
if client.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
req.Version = 1
|
||||
}
|
||||
response, err := broker.GetMetadata(req)
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
allKnownMetaData := len(topics) == 0
|
||||
// valid response, use it
|
||||
shouldRetry, err := client.updateMetadata(response)
|
||||
shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
|
||||
if shouldRetry {
|
||||
Logger.Println("client/metadata found some partitions to be leaderless")
|
||||
return retry(err) // note: err can be nil
|
||||
|
@ -674,7 +712,7 @@ func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int)
|
|||
}
|
||||
|
||||
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
|
||||
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
|
||||
func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
|
||||
client.lock.Lock()
|
||||
defer client.lock.Unlock()
|
||||
|
||||
|
@ -686,6 +724,12 @@ func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err er
|
|||
client.registerBroker(broker)
|
||||
}
|
||||
|
||||
client.controllerID = data.ControllerID
|
||||
|
||||
if allKnownMetaData {
|
||||
client.metadata = make(map[string]map[int32]*PartitionMetadata)
|
||||
client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
|
||||
}
|
||||
for _, topic := range data.Topics {
|
||||
delete(client.metadata, topic.Name)
|
||||
delete(client.cachedPartitionsResults, topic.Name)
|
||||
|
@ -735,8 +779,15 @@ func (client *client) cachedCoordinator(consumerGroup string) *Broker {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
|
||||
retry := func(err error) (*ConsumerMetadataResponse, error) {
|
||||
func (client *client) cachedController() *Broker {
|
||||
client.lock.RLock()
|
||||
defer client.lock.RUnlock()
|
||||
|
||||
return client.brokers[client.controllerID]
|
||||
}
|
||||
|
||||
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
|
||||
retry := func(err error) (*FindCoordinatorResponse, error) {
|
||||
if attemptsRemaining > 0 {
|
||||
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
|
||||
time.Sleep(client.conf.Metadata.Retry.Backoff)
|
||||
|
@ -748,10 +799,11 @@ func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemainin
|
|||
for broker := client.any(); broker != nil; broker = client.any() {
|
||||
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
|
||||
|
||||
request := new(ConsumerMetadataRequest)
|
||||
request.ConsumerGroup = consumerGroup
|
||||
request := new(FindCoordinatorRequest)
|
||||
request.CoordinatorKey = consumerGroup
|
||||
request.CoordinatorType = CoordinatorGroup
|
||||
|
||||
response, err := broker.GetConsumerMetadata(request)
|
||||
response, err := broker.FindCoordinator(request)
|
||||
|
||||
if err != nil {
|
||||
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
|
||||
|
|
41
vendor/github.com/Shopify/sarama/client_test.go
generated
vendored
41
vendor/github.com/Shopify/sarama/client_test.go
generated
vendored
|
@ -444,6 +444,47 @@ func TestClientResurrectDeadSeeds(t *testing.T) {
|
|||
safeClose(t, c)
|
||||
}
|
||||
|
||||
func TestClientController(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
defer seedBroker.Close()
|
||||
controllerBroker := NewMockBroker(t, 2)
|
||||
defer controllerBroker.Close()
|
||||
|
||||
seedBroker.SetHandlerByMap(map[string]MockResponse{
|
||||
"MetadataRequest": NewMockMetadataResponse(t).
|
||||
SetController(controllerBroker.BrokerID()).
|
||||
SetBroker(seedBroker.Addr(), seedBroker.BrokerID()).
|
||||
SetBroker(controllerBroker.Addr(), controllerBroker.BrokerID()),
|
||||
})
|
||||
|
||||
cfg := NewConfig()
|
||||
|
||||
// test kafka version greater than 0.10.0.0
|
||||
cfg.Version = V0_10_0_0
|
||||
client1, err := NewClient([]string{seedBroker.Addr()}, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, client1)
|
||||
broker, err := client1.Controller()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if broker.Addr() != controllerBroker.Addr() {
|
||||
t.Errorf("Expected controller to have address %s, found %s", controllerBroker.Addr(), broker.Addr())
|
||||
}
|
||||
|
||||
// test kafka version earlier than 0.10.0.0
|
||||
cfg.Version = V0_9_0_1
|
||||
client2, err := NewClient([]string{seedBroker.Addr()}, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, client2)
|
||||
if _, err = client2.Controller(); err != ErrControllerNotAvailable {
|
||||
t.Errorf("Expected Contoller() to return %s, found %s", ErrControllerNotAvailable, err)
|
||||
}
|
||||
}
|
||||
func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) {
|
||||
seedBroker := NewMockBroker(t, 1)
|
||||
staleCoordinator := NewMockBroker(t, 2)
|
||||
|
|
18
vendor/github.com/Shopify/sarama/config.go
generated
vendored
18
vendor/github.com/Shopify/sarama/config.go
generated
vendored
|
@ -1,7 +1,10 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
|
@ -99,6 +102,10 @@ type Config struct {
|
|||
// The type of compression to use on messages (defaults to no compression).
|
||||
// Similar to `compression.codec` setting of the JVM producer.
|
||||
Compression CompressionCodec
|
||||
// The level of compression to use on messages. The meaning depends
|
||||
// on the actual compression type used and defaults to default compression
|
||||
// level for the codec.
|
||||
CompressionLevel int
|
||||
// Generates partitioners for choosing the partition to send messages to
|
||||
// (defaults to hashing the message key). Similar to the `partitioner.class`
|
||||
// setting for the JVM producer.
|
||||
|
@ -290,6 +297,7 @@ func NewConfig() *Config {
|
|||
c.Producer.Retry.Max = 3
|
||||
c.Producer.Retry.Backoff = 100 * time.Millisecond
|
||||
c.Producer.Return.Errors = true
|
||||
c.Producer.CompressionLevel = CompressionLevelDefault
|
||||
|
||||
c.Consumer.Fetch.Min = 1
|
||||
c.Consumer.Fetch.Default = 1024 * 1024
|
||||
|
@ -302,7 +310,7 @@ func NewConfig() *Config {
|
|||
|
||||
c.ClientID = defaultClientID
|
||||
c.ChannelBufferSize = 256
|
||||
c.Version = minVersion
|
||||
c.Version = MinVersion
|
||||
c.MetricRegistry = metrics.NewRegistry()
|
||||
|
||||
return c
|
||||
|
@ -409,6 +417,14 @@ func (c *Config) Validate() error {
|
|||
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
|
||||
}
|
||||
|
||||
if c.Producer.Compression == CompressionGZIP {
|
||||
if c.Producer.CompressionLevel != CompressionLevelDefault {
|
||||
if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
|
||||
return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validate the Consumer values
|
||||
switch {
|
||||
case c.Consumer.Fetch.Min <= 0:
|
||||
|
|
85
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
85
vendor/github.com/Shopify/sarama/consumer.go
generated
vendored
|
@ -310,6 +310,7 @@ type partitionConsumer struct {
|
|||
|
||||
trigger, dying chan none
|
||||
responseResult error
|
||||
closeOnce sync.Once
|
||||
|
||||
fetchSize int32
|
||||
offset int64
|
||||
|
@ -412,7 +413,9 @@ func (child *partitionConsumer) AsyncClose() {
|
|||
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
|
||||
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
|
||||
// also just close itself)
|
||||
close(child.dying)
|
||||
child.closeOnce.Do(func() {
|
||||
close(child.dying)
|
||||
})
|
||||
}
|
||||
|
||||
func (child *partitionConsumer) Close() error {
|
||||
|
@ -461,7 +464,6 @@ feederLoop:
|
|||
child.messages <- msg
|
||||
}
|
||||
child.broker.input <- child
|
||||
expiryTicker.Stop()
|
||||
continue feederLoop
|
||||
} else {
|
||||
// current message has not been sent, return to select
|
||||
|
@ -482,9 +484,6 @@ feederLoop:
|
|||
|
||||
func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
|
||||
var messages []*ConsumerMessage
|
||||
var incomplete bool
|
||||
prelude := true
|
||||
|
||||
for _, msgBlock := range msgSet.Messages {
|
||||
for _, msg := range msgBlock.Messages() {
|
||||
offset := msg.Offset
|
||||
|
@ -492,29 +491,22 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe
|
|||
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
|
||||
offset += baseOffset
|
||||
}
|
||||
if prelude && offset < child.offset {
|
||||
if offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
BlockTimestamp: msgBlock.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: msg.Msg.Key,
|
||||
Value: msg.Msg.Value,
|
||||
Offset: offset,
|
||||
Timestamp: msg.Msg.Timestamp,
|
||||
BlockTimestamp: msgBlock.Msg.Timestamp,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
}
|
||||
}
|
||||
|
||||
if incomplete || len(messages) == 0 {
|
||||
if len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
return messages, nil
|
||||
|
@ -522,42 +514,25 @@ func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMe
|
|||
|
||||
func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
|
||||
var messages []*ConsumerMessage
|
||||
var incomplete bool
|
||||
prelude := true
|
||||
originalOffset := child.offset
|
||||
|
||||
for _, rec := range batch.Records {
|
||||
offset := batch.FirstOffset + rec.OffsetDelta
|
||||
if prelude && offset < child.offset {
|
||||
if offset < child.offset {
|
||||
continue
|
||||
}
|
||||
prelude = false
|
||||
|
||||
if offset >= child.offset {
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: rec.Key,
|
||||
Value: rec.Value,
|
||||
Offset: offset,
|
||||
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
|
||||
Headers: rec.Headers,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
} else {
|
||||
incomplete = true
|
||||
}
|
||||
messages = append(messages, &ConsumerMessage{
|
||||
Topic: child.topic,
|
||||
Partition: child.partition,
|
||||
Key: rec.Key,
|
||||
Value: rec.Value,
|
||||
Offset: offset,
|
||||
Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
|
||||
Headers: rec.Headers,
|
||||
})
|
||||
child.offset = offset + 1
|
||||
}
|
||||
|
||||
if incomplete {
|
||||
if len(messages) == 0 {
|
||||
return nil, ErrIncompleteResponse
|
||||
}
|
||||
|
||||
child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1
|
||||
if child.offset <= originalOffset {
|
||||
return nil, ErrConsumerOffsetNotAdvanced
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
|
@ -610,14 +585,14 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
|
|||
|
||||
switch records.recordsType {
|
||||
case legacyRecords:
|
||||
messageSetMessages, err := child.parseMessages(records.msgSet)
|
||||
messageSetMessages, err := child.parseMessages(records.MsgSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
messages = append(messages, messageSetMessages...)
|
||||
case defaultRecords:
|
||||
recordBatchMessages, err := child.parseRecords(records.recordBatch)
|
||||
recordBatchMessages, err := child.parseRecords(records.RecordBatch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
13
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
13
vendor/github.com/Shopify/sarama/consumer_metadata_request.go
generated
vendored
|
@ -5,12 +5,19 @@ type ConsumerMetadataRequest struct {
|
|||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
|
||||
return pe.putString(r.ConsumerGroup)
|
||||
tmp := new(FindCoordinatorRequest)
|
||||
tmp.CoordinatorKey = r.ConsumerGroup
|
||||
tmp.CoordinatorType = CoordinatorGroup
|
||||
return tmp.encode(pe)
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.ConsumerGroup, err = pd.getString()
|
||||
return err
|
||||
tmp := new(FindCoordinatorRequest)
|
||||
if err := tmp.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ConsumerGroup = tmp.CoordinatorKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ConsumerMetadataRequest) key() int16 {
|
||||
|
|
10
vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
generated
vendored
10
vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go
generated
vendored
|
@ -1,6 +1,8 @@
|
|||
package sarama
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
consumerMetadataRequestEmpty = []byte{
|
||||
|
@ -12,8 +14,10 @@ var (
|
|||
|
||||
func TestConsumerMetadataRequest(t *testing.T) {
|
||||
request := new(ConsumerMetadataRequest)
|
||||
testRequest(t, "empty string", request, consumerMetadataRequestEmpty)
|
||||
testEncodable(t, "empty string", request, consumerMetadataRequestEmpty)
|
||||
testVersionDecodable(t, "empty string", request, consumerMetadataRequestEmpty, 0)
|
||||
|
||||
request.ConsumerGroup = "foobar"
|
||||
testRequest(t, "with string", request, consumerMetadataRequestString)
|
||||
testEncodable(t, "with string", request, consumerMetadataRequestString)
|
||||
testVersionDecodable(t, "with string", request, consumerMetadataRequestString, 0)
|
||||
}
|
||||
|
|
48
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
48
vendor/github.com/Shopify/sarama/consumer_metadata_response.go
generated
vendored
|
@ -14,20 +14,18 @@ type ConsumerMetadataResponse struct {
|
|||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Err = KError(tmp)
|
||||
tmp := new(FindCoordinatorResponse)
|
||||
|
||||
coordinator := new(Broker)
|
||||
if err := coordinator.decode(pd); err != nil {
|
||||
if err := tmp.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
if coordinator.addr == ":0" {
|
||||
|
||||
r.Err = tmp.Err
|
||||
|
||||
r.Coordinator = tmp.Coordinator
|
||||
if tmp.Coordinator == nil {
|
||||
return nil
|
||||
}
|
||||
r.Coordinator = coordinator
|
||||
|
||||
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
|
||||
// backwards compatibility
|
||||
|
@ -47,28 +45,22 @@ func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err
|
|||
}
|
||||
|
||||
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if r.Coordinator != nil {
|
||||
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.ParseInt(portstr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.Coordinator.ID())
|
||||
if err := pe.putString(host); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(int32(port))
|
||||
return nil
|
||||
if r.Coordinator == nil {
|
||||
r.Coordinator = new(Broker)
|
||||
r.Coordinator.id = r.CoordinatorID
|
||||
r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
|
||||
}
|
||||
pe.putInt32(r.CoordinatorID)
|
||||
if err := pe.putString(r.CoordinatorHost); err != nil {
|
||||
|
||||
tmp := &FindCoordinatorResponse{
|
||||
Version: 0,
|
||||
Err: r.Err,
|
||||
Coordinator: r.Coordinator,
|
||||
}
|
||||
|
||||
if err := tmp.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt32(r.CoordinatorPort)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
13
vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
generated
vendored
13
vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go
generated
vendored
|
@ -17,8 +17,17 @@ var (
|
|||
)
|
||||
|
||||
func TestConsumerMetadataResponseError(t *testing.T) {
|
||||
response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
|
||||
testResponse(t, "error", &response, consumerMetadataResponseError)
|
||||
response := &ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress}
|
||||
testEncodable(t, "", response, consumerMetadataResponseError)
|
||||
|
||||
decodedResp := &ConsumerMetadataResponse{}
|
||||
if err := versionedDecode(consumerMetadataResponseError, decodedResp, 0); err != nil {
|
||||
t.Error("could not decode: ", err)
|
||||
}
|
||||
|
||||
if decodedResp.Err != ErrOffsetsLoadInProgress {
|
||||
t.Errorf("got %s, want %s", decodedResp.Err, ErrOffsetsLoadInProgress)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsumerMetadataResponseSuccess(t *testing.T) {
|
||||
|
|
30
vendor/github.com/Shopify/sarama/delete_groups_request.go
generated
vendored
Normal file
30
vendor/github.com/Shopify/sarama/delete_groups_request.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package sarama
|
||||
|
||||
type DeleteGroupsRequest struct {
|
||||
Groups []string
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
|
||||
return pe.putStringArray(r.Groups)
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Groups, err = pd.getStringArray()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) key() int16 {
|
||||
return 42
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
|
||||
return V1_1_0_0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsRequest) AddGroup(group string) {
|
||||
r.Groups = append(r.Groups, group)
|
||||
}
|
34
vendor/github.com/Shopify/sarama/delete_groups_request_test.go
generated
vendored
Normal file
34
vendor/github.com/Shopify/sarama/delete_groups_request_test.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
emptyDeleteGroupsRequest = []byte{0, 0, 0, 0}
|
||||
|
||||
singleDeleteGroupsRequest = []byte{
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
}
|
||||
|
||||
doubleDeleteGroupsRequest = []byte{
|
||||
0, 0, 0, 2, // 2 groups
|
||||
0, 3, 'f', 'o', 'o', // group name: foo
|
||||
0, 3, 'b', 'a', 'r', // group name: foo
|
||||
}
|
||||
)
|
||||
|
||||
func TestDeleteGroupsRequest(t *testing.T) {
|
||||
var request *DeleteGroupsRequest
|
||||
|
||||
request = new(DeleteGroupsRequest)
|
||||
testRequest(t, "no groups", request, emptyDeleteGroupsRequest)
|
||||
|
||||
request = new(DeleteGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
testRequest(t, "one group", request, singleDeleteGroupsRequest)
|
||||
|
||||
request = new(DeleteGroupsRequest)
|
||||
request.AddGroup("foo")
|
||||
request.AddGroup("bar")
|
||||
testRequest(t, "two groups", request, doubleDeleteGroupsRequest)
|
||||
}
|
70
vendor/github.com/Shopify/sarama/delete_groups_response.go
generated
vendored
Normal file
70
vendor/github.com/Shopify/sarama/delete_groups_response.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type DeleteGroupsResponse struct {
|
||||
ThrottleTime time.Duration
|
||||
GroupErrorCodes map[string]KError
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
|
||||
|
||||
if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
|
||||
return err
|
||||
}
|
||||
for groupID, errorCode := range r.GroupErrorCodes {
|
||||
if err := pe.putString(groupID); err != nil {
|
||||
return err
|
||||
}
|
||||
pe.putInt16(int16(errorCode))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GroupErrorCodes = make(map[string]KError, n)
|
||||
for i := 0; i < n; i++ {
|
||||
groupID, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errorCode, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.GroupErrorCodes[groupID] = KError(errorCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) key() int16 {
|
||||
return 42
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
|
||||
return V1_1_0_0
|
||||
}
|
57
vendor/github.com/Shopify/sarama/delete_groups_response_test.go
generated
vendored
Normal file
57
vendor/github.com/Shopify/sarama/delete_groups_response_test.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyDeleteGroupsResponse = []byte{
|
||||
0, 0, 0, 0, // does not violate any quota
|
||||
0, 0, 0, 0, // no groups
|
||||
}
|
||||
|
||||
errorDeleteGroupsResponse = []byte{
|
||||
0, 0, 0, 0, // does not violate any quota
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name
|
||||
0, 31, // error ErrClusterAuthorizationFailed
|
||||
}
|
||||
|
||||
noErrorDeleteGroupsResponse = []byte{
|
||||
0, 0, 0, 0, // does not violate any quota
|
||||
0, 0, 0, 1, // 1 group
|
||||
0, 3, 'f', 'o', 'o', // group name
|
||||
0, 0, // no error
|
||||
}
|
||||
)
|
||||
|
||||
func TestDeleteGroupsResponse(t *testing.T) {
|
||||
var response *DeleteGroupsResponse
|
||||
|
||||
response = new(DeleteGroupsResponse)
|
||||
testVersionDecodable(t, "empty", response, emptyDeleteGroupsResponse, 0)
|
||||
if response.ThrottleTime != 0 {
|
||||
t.Error("Expected no violation")
|
||||
}
|
||||
if len(response.GroupErrorCodes) != 0 {
|
||||
t.Error("Expected no groups")
|
||||
}
|
||||
|
||||
response = new(DeleteGroupsResponse)
|
||||
testVersionDecodable(t, "error", response, errorDeleteGroupsResponse, 0)
|
||||
if response.ThrottleTime != 0 {
|
||||
t.Error("Expected no violation")
|
||||
}
|
||||
if response.GroupErrorCodes["foo"] != ErrClusterAuthorizationFailed {
|
||||
t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"])
|
||||
}
|
||||
|
||||
response = new(DeleteGroupsResponse)
|
||||
testVersionDecodable(t, "no error", response, noErrorDeleteGroupsResponse, 0)
|
||||
if response.ThrottleTime != 0 {
|
||||
t.Error("Expected no violation")
|
||||
}
|
||||
if response.GroupErrorCodes["foo"] != ErrNoError {
|
||||
t.Error("Expected error ErrClusterAuthorizationFailed, found:", response.GroupErrorCodes["foo"])
|
||||
}
|
||||
}
|
126
vendor/github.com/Shopify/sarama/delete_records_request.go
generated
vendored
Normal file
126
vendor/github.com/Shopify/sarama/delete_records_request.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// request message format is:
|
||||
// [topic] timeout(int32)
|
||||
// where topic is:
|
||||
// name(string) [partition]
|
||||
// where partition is:
|
||||
// id(int32) offset(int64)
|
||||
|
||||
type DeleteRecordsRequest struct {
|
||||
Topics map[string]*DeleteRecordsRequestTopic
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(d.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := make([]string, 0, len(d.Topics))
|
||||
for topic := range d.Topics {
|
||||
keys = append(keys, topic)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, topic := range keys {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Topics[topic].encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pe.putInt32(int32(d.Timeout / time.Millisecond))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
|
||||
for i := 0; i < n; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
details := new(DeleteRecordsRequestTopic)
|
||||
if err = details.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Topics[topic] = details
|
||||
}
|
||||
}
|
||||
|
||||
timeout, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) key() int16 {
|
||||
return 21
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
||||
type DeleteRecordsRequestTopic struct {
|
||||
PartitionOffsets map[int32]int64 // partition => offset
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := make([]int32, 0, len(t.PartitionOffsets))
|
||||
for partition := range t.PartitionOffsets {
|
||||
keys = append(keys, partition)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
|
||||
for _, partition := range keys {
|
||||
pe.putInt32(partition)
|
||||
pe.putInt64(t.PartitionOffsets[partition])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.PartitionOffsets = make(map[int32]int64, n)
|
||||
for i := 0; i < n; i++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offset, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.PartitionOffsets[partition] = offset
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
36
vendor/github.com/Shopify/sarama/delete_records_request_test.go
generated
vendored
Normal file
36
vendor/github.com/Shopify/sarama/delete_records_request_test.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var deleteRecordsRequest = []byte{
|
||||
0, 0, 0, 2,
|
||||
0, 5, 'o', 't', 'h', 'e', 'r',
|
||||
0, 0, 0, 0,
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 0, 0, 2,
|
||||
0, 0, 0, 19,
|
||||
0, 0, 0, 0, 0, 0, 0, 200,
|
||||
0, 0, 0, 20,
|
||||
0, 0, 0, 0, 0, 0, 0, 190,
|
||||
0, 0, 0, 100,
|
||||
}
|
||||
|
||||
func TestDeleteRecordsRequest(t *testing.T) {
|
||||
req := &DeleteRecordsRequest{
|
||||
Topics: map[string]*DeleteRecordsRequestTopic{
|
||||
"topic": {
|
||||
PartitionOffsets: map[int32]int64{
|
||||
19: 200,
|
||||
20: 190,
|
||||
},
|
||||
},
|
||||
"other": {},
|
||||
},
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}
|
||||
|
||||
testRequest(t, "", req, deleteRecordsRequest)
|
||||
}
|
158
vendor/github.com/Shopify/sarama/delete_records_response.go
generated
vendored
Normal file
158
vendor/github.com/Shopify/sarama/delete_records_response.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// response message format is:
|
||||
// throttleMs(int32) [topic]
|
||||
// where topic is:
|
||||
// name(string) [partition]
|
||||
// where partition is:
|
||||
// id(int32) low_watermark(int64) error_code(int16)
|
||||
|
||||
type DeleteRecordsResponse struct {
|
||||
Version int16
|
||||
ThrottleTime time.Duration
|
||||
Topics map[string]*DeleteRecordsResponseTopic
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
|
||||
|
||||
if err := pe.putArrayLength(len(d.Topics)); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := make([]string, 0, len(d.Topics))
|
||||
for topic := range d.Topics {
|
||||
keys = append(keys, topic)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, topic := range keys {
|
||||
if err := pe.putString(topic); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.Topics[topic].encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
|
||||
d.Version = version
|
||||
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
|
||||
for i := 0; i < n; i++ {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
details := new(DeleteRecordsResponseTopic)
|
||||
if err = details.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Topics[topic] = details
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) key() int16 {
|
||||
return 21
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_11_0_0
|
||||
}
|
||||
|
||||
type DeleteRecordsResponseTopic struct {
|
||||
Partitions map[int32]*DeleteRecordsResponsePartition
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
|
||||
if err := pe.putArrayLength(len(t.Partitions)); err != nil {
|
||||
return err
|
||||
}
|
||||
keys := make([]int32, 0, len(t.Partitions))
|
||||
for partition := range t.Partitions {
|
||||
keys = append(keys, partition)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
|
||||
for _, partition := range keys {
|
||||
pe.putInt32(partition)
|
||||
if err := t.Partitions[partition].encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
|
||||
for i := 0; i < n; i++ {
|
||||
partition, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
details := new(DeleteRecordsResponsePartition)
|
||||
if err = details.decode(pd, version); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Partitions[partition] = details
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteRecordsResponsePartition struct {
|
||||
LowWatermark int64
|
||||
Err KError
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
|
||||
pe.putInt64(t.LowWatermark)
|
||||
pe.putInt16(int16(t.Err))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
|
||||
lowWatermark, err := pd.getInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.LowWatermark = lowWatermark
|
||||
|
||||
kErr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Err = KError(kErr)
|
||||
|
||||
return nil
|
||||
}
|
39
vendor/github.com/Shopify/sarama/delete_records_response_test.go
generated
vendored
Normal file
39
vendor/github.com/Shopify/sarama/delete_records_response_test.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var deleteRecordsResponse = []byte{
|
||||
0, 0, 0, 100,
|
||||
0, 0, 0, 2,
|
||||
0, 5, 'o', 't', 'h', 'e', 'r',
|
||||
0, 0, 0, 0,
|
||||
0, 5, 't', 'o', 'p', 'i', 'c',
|
||||
0, 0, 0, 2,
|
||||
0, 0, 0, 19,
|
||||
0, 0, 0, 0, 0, 0, 0, 200,
|
||||
0, 0,
|
||||
0, 0, 0, 20,
|
||||
255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 3,
|
||||
}
|
||||
|
||||
func TestDeleteRecordsResponse(t *testing.T) {
|
||||
resp := &DeleteRecordsResponse{
|
||||
Version: 0,
|
||||
ThrottleTime: 100 * time.Millisecond,
|
||||
Topics: map[string]*DeleteRecordsResponseTopic{
|
||||
"topic": {
|
||||
Partitions: map[int32]*DeleteRecordsResponsePartition{
|
||||
19: {LowWatermark: 200, Err: 0},
|
||||
20: {LowWatermark: -1, Err: 3},
|
||||
},
|
||||
},
|
||||
"other": {},
|
||||
},
|
||||
}
|
||||
|
||||
testResponse(t, "", resp, deleteRecordsResponse)
|
||||
}
|
11
vendor/github.com/Shopify/sarama/delete_topics_request.go
generated
vendored
11
vendor/github.com/Shopify/sarama/delete_topics_request.go
generated
vendored
|
@ -3,6 +3,7 @@ package sarama
|
|||
import "time"
|
||||
|
||||
type DeleteTopicsRequest struct {
|
||||
Version int16
|
||||
Topics []string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
@ -25,6 +26,7 @@ func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error
|
|||
return err
|
||||
}
|
||||
d.Timeout = time.Duration(timeout) * time.Millisecond
|
||||
d.Version = version
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -33,9 +35,14 @@ func (d *DeleteTopicsRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (d *DeleteTopicsRequest) version() int16 {
|
||||
return 0
|
||||
return d.Version
|
||||
}
|
||||
|
||||
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_1_0
|
||||
switch d.Version {
|
||||
case 1:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return V0_10_1_0
|
||||
}
|
||||
}
|
||||
|
|
13
vendor/github.com/Shopify/sarama/delete_topics_request_test.go
generated
vendored
13
vendor/github.com/Shopify/sarama/delete_topics_request_test.go
generated
vendored
|
@ -12,8 +12,19 @@ var deleteTopicsRequest = []byte{
|
|||
0, 0, 0, 100,
|
||||
}
|
||||
|
||||
func TestDeleteTopicsRequest(t *testing.T) {
|
||||
func TestDeleteTopicsRequestV0(t *testing.T) {
|
||||
req := &DeleteTopicsRequest{
|
||||
Version: 0,
|
||||
Topics: []string{"topic", "other"},
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}
|
||||
|
||||
testRequest(t, "", req, deleteTopicsRequest)
|
||||
}
|
||||
|
||||
func TestDeleteTopicsRequestV1(t *testing.T) {
|
||||
req := &DeleteTopicsRequest{
|
||||
Version: 1,
|
||||
Topics: []string{"topic", "other"},
|
||||
Timeout: 100 * time.Millisecond,
|
||||
}
|
||||
|
|
8
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
8
vendor/github.com/Shopify/sarama/errors.go
generated
vendored
|
@ -41,6 +41,14 @@ var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetc
|
|||
// a RecordBatch.
|
||||
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
|
||||
|
||||
// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
|
||||
// is lower than 0.10.0.0.
|
||||
var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
|
||||
|
||||
// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
|
||||
// the metadata.
|
||||
var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
|
||||
|
||||
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
|
||||
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
|
||||
type PacketEncodingError struct {
|
||||
|
|
2
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
2
vendor/github.com/Shopify/sarama/fetch_request.go
generated
vendored
|
@ -149,7 +149,7 @@ func (r *FetchRequest) requiredVersion() KafkaVersion {
|
|||
case 4:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
8
vendor/github.com/Shopify/sarama/fetch_response.go
generated
vendored
|
@ -280,7 +280,7 @@ func (r *FetchResponse) requiredVersion() KafkaVersion {
|
|||
case 4:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -353,7 +353,7 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
|
|||
records := newLegacyRecords(&MessageSet{})
|
||||
frb.RecordsSet = []*Records{&records}
|
||||
}
|
||||
set := frb.RecordsSet[0].msgSet
|
||||
set := frb.RecordsSet[0].MsgSet
|
||||
set.Messages = append(set.Messages, msgBlock)
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Enco
|
|||
records := newDefaultRecords(&RecordBatch{Version: 2})
|
||||
frb.RecordsSet = []*Records{&records}
|
||||
}
|
||||
batch := frb.RecordsSet[0].recordBatch
|
||||
batch := frb.RecordsSet[0].RecordBatch
|
||||
batch.addRecord(rec)
|
||||
}
|
||||
|
||||
|
@ -375,7 +375,7 @@ func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset
|
|||
records := newDefaultRecords(&RecordBatch{Version: 2})
|
||||
frb.RecordsSet = []*Records{&records}
|
||||
}
|
||||
batch := frb.RecordsSet[0].recordBatch
|
||||
batch := frb.RecordsSet[0].RecordBatch
|
||||
batch.LastOffsetDelta = offset
|
||||
}
|
||||
|
||||
|
|
6
vendor/github.com/Shopify/sarama/fetch_response_test.go
generated
vendored
6
vendor/github.com/Shopify/sarama/fetch_response_test.go
generated
vendored
|
@ -132,7 +132,7 @@ func TestOneMessageFetchResponse(t *testing.T) {
|
|||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of messages.")
|
||||
}
|
||||
msgBlock := block.RecordsSet[0].msgSet.Messages[0]
|
||||
msgBlock := block.RecordsSet[0].MsgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func TestOneRecordFetchResponse(t *testing.T) {
|
|||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of records.")
|
||||
}
|
||||
rec := block.RecordsSet[0].recordBatch.Records[0]
|
||||
rec := block.RecordsSet[0].RecordBatch.Records[0]
|
||||
if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) {
|
||||
t.Error("Decoding produced incorrect record key.")
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ func TestOneMessageFetchResponseV4(t *testing.T) {
|
|||
if n != 1 {
|
||||
t.Fatal("Decoding produced incorrect number of records.")
|
||||
}
|
||||
msgBlock := block.RecordsSet[0].msgSet.Messages[0]
|
||||
msgBlock := block.RecordsSet[0].MsgSet.Messages[0]
|
||||
if msgBlock.Offset != 0x550000 {
|
||||
t.Error("Decoding produced incorrect message offset.")
|
||||
}
|
||||
|
|
61
vendor/github.com/Shopify/sarama/find_coordinator_request.go
generated
vendored
Normal file
61
vendor/github.com/Shopify/sarama/find_coordinator_request.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package sarama
|
||||
|
||||
type CoordinatorType int8
|
||||
|
||||
const (
|
||||
CoordinatorGroup CoordinatorType = 0
|
||||
CoordinatorTransaction CoordinatorType = 1
|
||||
)
|
||||
|
||||
type FindCoordinatorRequest struct {
|
||||
Version int16
|
||||
CoordinatorKey string
|
||||
CoordinatorType CoordinatorType
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
|
||||
if err := pe.putString(f.CoordinatorKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.Version >= 1 {
|
||||
pe.putInt8(int8(f.CoordinatorType))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
if f.CoordinatorKey, err = pd.getString(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
f.Version = version
|
||||
coordinatorType, err := pd.getInt8()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.CoordinatorType = CoordinatorType(coordinatorType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) version() int16 {
|
||||
return f.Version
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
|
||||
switch f.Version {
|
||||
case 1:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return V0_8_2_0
|
||||
}
|
||||
}
|
33
vendor/github.com/Shopify/sarama/find_coordinator_request_test.go
generated
vendored
Normal file
33
vendor/github.com/Shopify/sarama/find_coordinator_request_test.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package sarama
|
||||
|
||||
import "testing"
|
||||
|
||||
var (
|
||||
findCoordinatorRequestConsumerGroup = []byte{
|
||||
0, 5, 'g', 'r', 'o', 'u', 'p',
|
||||
0,
|
||||
}
|
||||
|
||||
findCoordinatorRequestTransaction = []byte{
|
||||
0, 13, 't', 'r', 'a', 'n', 's', 'a', 'c', 't', 'i', 'o', 'n', 'i', 'd',
|
||||
1,
|
||||
}
|
||||
)
|
||||
|
||||
func TestFindCoordinatorRequest(t *testing.T) {
|
||||
req := &FindCoordinatorRequest{
|
||||
Version: 1,
|
||||
CoordinatorKey: "group",
|
||||
CoordinatorType: CoordinatorGroup,
|
||||
}
|
||||
|
||||
testRequest(t, "version 1 - group", req, findCoordinatorRequestConsumerGroup)
|
||||
|
||||
req = &FindCoordinatorRequest{
|
||||
Version: 1,
|
||||
CoordinatorKey: "transactionid",
|
||||
CoordinatorType: CoordinatorTransaction,
|
||||
}
|
||||
|
||||
testRequest(t, "version 1 - transaction", req, findCoordinatorRequestTransaction)
|
||||
}
|
92
vendor/github.com/Shopify/sarama/find_coordinator_response.go
generated
vendored
Normal file
92
vendor/github.com/Shopify/sarama/find_coordinator_response.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var NoNode = &Broker{id: -1, addr: ":-1"}
|
||||
|
||||
type FindCoordinatorResponse struct {
|
||||
Version int16
|
||||
ThrottleTime time.Duration
|
||||
Err KError
|
||||
ErrMsg *string
|
||||
Coordinator *Broker
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
if version >= 1 {
|
||||
f.Version = version
|
||||
|
||||
throttleTime, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
|
||||
}
|
||||
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Err = KError(tmp)
|
||||
|
||||
if version >= 1 {
|
||||
if f.ErrMsg, err = pd.getNullableString(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
coordinator := new(Broker)
|
||||
// The version is hardcoded to 0, as version 1 of the Broker-decode
|
||||
// contains the rack-field which is not present in the FindCoordinatorResponse.
|
||||
if err := coordinator.decode(pd, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if coordinator.addr == ":0" {
|
||||
return nil
|
||||
}
|
||||
f.Coordinator = coordinator
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
|
||||
if f.Version >= 1 {
|
||||
pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
|
||||
}
|
||||
|
||||
pe.putInt16(int16(f.Err))
|
||||
|
||||
if f.Version >= 1 {
|
||||
if err := pe.putNullableString(f.ErrMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
coordinator := f.Coordinator
|
||||
if coordinator == nil {
|
||||
coordinator = NoNode
|
||||
}
|
||||
if err := coordinator.encode(pe, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) key() int16 {
|
||||
return 10
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) version() int16 {
|
||||
return f.Version
|
||||
}
|
||||
|
||||
func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
|
||||
switch f.Version {
|
||||
case 1:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return V0_8_2_0
|
||||
}
|
||||
}
|
83
vendor/github.com/Shopify/sarama/find_coordinator_response_test.go
generated
vendored
Normal file
83
vendor/github.com/Shopify/sarama/find_coordinator_response_test.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFindCoordinatorResponse(t *testing.T) {
|
||||
errMsg := "kaboom"
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
response *FindCoordinatorResponse
|
||||
encoded []byte
|
||||
}{{
|
||||
desc: "version 0 - no error",
|
||||
response: &FindCoordinatorResponse{
|
||||
Version: 0,
|
||||
Err: ErrNoError,
|
||||
Coordinator: &Broker{
|
||||
id: 7,
|
||||
addr: "host:9092",
|
||||
},
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, // Err
|
||||
0, 0, 0, 7, // Coordinator.ID
|
||||
0, 4, 'h', 'o', 's', 't', // Coordinator.Host
|
||||
0, 0, 35, 132, // Coordinator.Port
|
||||
},
|
||||
}, {
|
||||
desc: "version 1 - no error",
|
||||
response: &FindCoordinatorResponse{
|
||||
Version: 1,
|
||||
ThrottleTime: 100 * time.Millisecond,
|
||||
Err: ErrNoError,
|
||||
Coordinator: &Broker{
|
||||
id: 7,
|
||||
addr: "host:9092",
|
||||
},
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 100, // ThrottleTime
|
||||
0, 0, // Err
|
||||
255, 255, // ErrMsg: empty
|
||||
0, 0, 0, 7, // Coordinator.ID
|
||||
0, 4, 'h', 'o', 's', 't', // Coordinator.Host
|
||||
0, 0, 35, 132, // Coordinator.Port
|
||||
},
|
||||
}, {
|
||||
desc: "version 0 - error",
|
||||
response: &FindCoordinatorResponse{
|
||||
Version: 0,
|
||||
Err: ErrConsumerCoordinatorNotAvailable,
|
||||
Coordinator: NoNode,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 15, // Err
|
||||
255, 255, 255, 255, // Coordinator.ID: -1
|
||||
0, 0, // Coordinator.Host: ""
|
||||
255, 255, 255, 255, // Coordinator.Port: -1
|
||||
},
|
||||
}, {
|
||||
desc: "version 1 - error",
|
||||
response: &FindCoordinatorResponse{
|
||||
Version: 1,
|
||||
ThrottleTime: 100 * time.Millisecond,
|
||||
Err: ErrConsumerCoordinatorNotAvailable,
|
||||
ErrMsg: &errMsg,
|
||||
Coordinator: NoNode,
|
||||
},
|
||||
encoded: []byte{
|
||||
0, 0, 0, 100, // ThrottleTime
|
||||
0, 15, // Err
|
||||
0, 6, 'k', 'a', 'b', 'o', 'o', 'm', // ErrMsg
|
||||
255, 255, 255, 255, // Coordinator.ID: -1
|
||||
0, 0, // Coordinator.Host: ""
|
||||
255, 255, 255, 255, // Coordinator.Port: -1
|
||||
},
|
||||
}} {
|
||||
testResponse(t, tc.desc, tc.response, tc.encoded)
|
||||
}
|
||||
}
|
167
vendor/github.com/Shopify/sarama/functional_consumer_test.go
generated
vendored
167
vendor/github.com/Shopify/sarama/functional_consumer_test.go
generated
vendored
|
@ -1,8 +1,13 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFuncConsumerOffsetOutOfRange(t *testing.T) {
|
||||
|
@ -46,7 +51,7 @@ func TestConsumerHighWaterMarkOffset(t *testing.T) {
|
|||
}
|
||||
defer safeClose(t, c)
|
||||
|
||||
pc, err := c.ConsumePartition("test.1", 0, OffsetOldest)
|
||||
pc, err := c.ConsumePartition("test.1", 0, offset)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -59,3 +64,163 @@ func TestConsumerHighWaterMarkOffset(t *testing.T) {
|
|||
|
||||
safeClose(t, pc)
|
||||
}
|
||||
|
||||
// Makes sure that messages produced by all supported client versions/
|
||||
// compression codecs (except LZ4) combinations can be consumed by all
|
||||
// supported consumer versions. It relies on the KAFKA_VERSION environment
|
||||
// variable to provide the version of the test Kafka cluster.
|
||||
//
|
||||
// Note that LZ4 codec was introduced in v0.10.0.0 and therefore is excluded
|
||||
// from this test case. It has a similar version matrix test case below that
|
||||
// only checks versions from v0.10.0.0 until KAFKA_VERSION.
|
||||
func TestVersionMatrix(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
// Produce lot's of message with all possible combinations of supported
|
||||
// protocol versions and compressions for the except of LZ4.
|
||||
testVersions := versionRange(V0_8_2_0)
|
||||
allCodecsButLZ4 := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy}
|
||||
producedMessages := produceMsgs(t, testVersions, allCodecsButLZ4, 17, 100)
|
||||
|
||||
// When/Then
|
||||
consumeMsgs(t, testVersions, producedMessages)
|
||||
}
|
||||
|
||||
// Support for LZ4 codec was introduced in v0.10.0.0 so a version matrix to
|
||||
// test LZ4 should start with v0.10.0.0.
|
||||
func TestVersionMatrixLZ4(t *testing.T) {
|
||||
setupFunctionalTest(t)
|
||||
defer teardownFunctionalTest(t)
|
||||
|
||||
// Produce lot's of message with all possible combinations of supported
|
||||
// protocol versions starting with v0.10 (first where LZ4 was supported)
|
||||
// and all possible compressions.
|
||||
testVersions := versionRange(V0_10_0_0)
|
||||
allCodecs := []CompressionCodec{CompressionNone, CompressionGZIP, CompressionSnappy, CompressionLZ4}
|
||||
producedMessages := produceMsgs(t, testVersions, allCodecs, 17, 100)
|
||||
|
||||
// When/Then
|
||||
consumeMsgs(t, testVersions, producedMessages)
|
||||
}
|
||||
|
||||
func prodMsg2Str(prodMsg *ProducerMessage) string {
|
||||
return fmt.Sprintf("{offset: %d, value: %s}", prodMsg.Offset, string(prodMsg.Value.(StringEncoder)))
|
||||
}
|
||||
|
||||
func consMsg2Str(consMsg *ConsumerMessage) string {
|
||||
return fmt.Sprintf("{offset: %d, value: %s}", consMsg.Offset, string(consMsg.Value))
|
||||
}
|
||||
|
||||
func versionRange(lower KafkaVersion) []KafkaVersion {
|
||||
// Get the test cluster version from the environment. If there is nothing
|
||||
// there then assume the highest.
|
||||
upper, err := ParseKafkaVersion(os.Getenv("KAFKA_VERSION"))
|
||||
if err != nil {
|
||||
upper = MaxVersion
|
||||
}
|
||||
|
||||
versions := make([]KafkaVersion, 0, len(SupportedVersions))
|
||||
for _, v := range SupportedVersions {
|
||||
if !v.IsAtLeast(lower) {
|
||||
continue
|
||||
}
|
||||
if !upper.IsAtLeast(v) {
|
||||
return versions
|
||||
}
|
||||
versions = append(versions, v)
|
||||
}
|
||||
return versions
|
||||
}
|
||||
|
||||
func produceMsgs(t *testing.T, clientVersions []KafkaVersion, codecs []CompressionCodec, flush int, countPerVerCodec int) []*ProducerMessage {
|
||||
var wg sync.WaitGroup
|
||||
var producedMessagesMu sync.Mutex
|
||||
var producedMessages []*ProducerMessage
|
||||
for _, prodVer := range clientVersions {
|
||||
for _, codec := range codecs {
|
||||
prodCfg := NewConfig()
|
||||
prodCfg.Version = prodVer
|
||||
prodCfg.Producer.Return.Successes = true
|
||||
prodCfg.Producer.Return.Errors = true
|
||||
prodCfg.Producer.Flush.MaxMessages = flush
|
||||
prodCfg.Producer.Compression = codec
|
||||
|
||||
p, err := NewSyncProducer(kafkaBrokers, prodCfg)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create producer: version=%s, compression=%s, err=%v", prodVer, codec, err)
|
||||
continue
|
||||
}
|
||||
defer safeClose(t, p)
|
||||
for i := 0; i < countPerVerCodec; i++ {
|
||||
msg := &ProducerMessage{
|
||||
Topic: "test.1",
|
||||
Value: StringEncoder(fmt.Sprintf("msg:%s:%s:%d", prodVer, codec, i)),
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_, _, err := p.SendMessage(msg)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to produce message: %s, err=%v", msg.Value, err)
|
||||
}
|
||||
producedMessagesMu.Lock()
|
||||
producedMessages = append(producedMessages, msg)
|
||||
producedMessagesMu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Sort produced message in ascending offset order.
|
||||
sort.Slice(producedMessages, func(i, j int) bool {
|
||||
return producedMessages[i].Offset < producedMessages[j].Offset
|
||||
})
|
||||
t.Logf("*** Total produced %d, firstOffset=%d, lastOffset=%d\n",
|
||||
len(producedMessages), producedMessages[0].Offset, producedMessages[len(producedMessages)-1].Offset)
|
||||
return producedMessages
|
||||
}
|
||||
|
||||
func consumeMsgs(t *testing.T, clientVersions []KafkaVersion, producedMessages []*ProducerMessage) {
|
||||
// Consume all produced messages with all client versions supported by the
|
||||
// cluster.
|
||||
consumerVersionLoop:
|
||||
for _, consVer := range clientVersions {
|
||||
t.Logf("*** Consuming with client version %s\n", consVer)
|
||||
// Create a partition consumer that should start from the first produced
|
||||
// message.
|
||||
consCfg := NewConfig()
|
||||
consCfg.Version = consVer
|
||||
c, err := NewConsumer(kafkaBrokers, consCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, c)
|
||||
pc, err := c.ConsumePartition("test.1", 0, producedMessages[0].Offset)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer safeClose(t, pc)
|
||||
|
||||
// Consume as many messages as there have been produced and make sure that
|
||||
// order is preserved.
|
||||
for i, prodMsg := range producedMessages {
|
||||
select {
|
||||
case consMsg := <-pc.Messages():
|
||||
if consMsg.Offset != prodMsg.Offset {
|
||||
t.Errorf("Consumed unexpected offset: version=%s, index=%d, want=%s, got=%s",
|
||||
consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg))
|
||||
continue consumerVersionLoop
|
||||
}
|
||||
if string(consMsg.Value) != string(prodMsg.Value.(StringEncoder)) {
|
||||
t.Errorf("Consumed unexpected msg: version=%s, index=%d, want=%s, got=%s",
|
||||
consVer, i, prodMsg2Str(prodMsg), consMsg2Str(consMsg))
|
||||
continue consumerVersionLoop
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("Timeout waiting for: index=%d, offset=%d, msg=%s", i, prodMsg.Offset, prodMsg.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
24
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
24
vendor/github.com/Shopify/sarama/join_group_request.go
generated
vendored
|
@ -25,8 +25,10 @@ func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
type JoinGroupRequest struct {
|
||||
Version int16
|
||||
GroupId string
|
||||
SessionTimeout int32
|
||||
RebalanceTimeout int32
|
||||
MemberId string
|
||||
ProtocolType string
|
||||
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
|
||||
|
@ -38,6 +40,9 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
|||
return err
|
||||
}
|
||||
pe.putInt32(r.SessionTimeout)
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(r.RebalanceTimeout)
|
||||
}
|
||||
if err := pe.putString(r.MemberId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -76,6 +81,8 @@ func (r *JoinGroupRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if r.GroupId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -84,6 +91,12 @@ func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.MemberId, err = pd.getString(); err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -118,11 +131,18 @@ func (r *JoinGroupRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (r *JoinGroupRequest) version() int16 {
|
||||
return 0
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
switch r.Version {
|
||||
case 2:
|
||||
return V0_11_0_0
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return V0_9_0_0
|
||||
}
|
||||
}
|
||||
|
||||
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
|
||||
|
|
42
vendor/github.com/Shopify/sarama/join_group_request_test.go
generated
vendored
42
vendor/github.com/Shopify/sarama/join_group_request_test.go
generated
vendored
|
@ -3,7 +3,7 @@ package sarama
|
|||
import "testing"
|
||||
|
||||
var (
|
||||
joinGroupRequestNoProtocols = []byte{
|
||||
joinGroupRequestV0_NoProtocols = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 0, // Member ID
|
||||
|
@ -11,7 +11,7 @@ var (
|
|||
0, 0, 0, 0, // 0 protocol groups
|
||||
}
|
||||
|
||||
joinGroupRequestOneProtocol = []byte{
|
||||
joinGroupRequestV0_OneProtocol = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
|
||||
|
@ -20,6 +20,17 @@ var (
|
|||
0, 3, 'o', 'n', 'e', // Protocol name
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
|
||||
}
|
||||
|
||||
joinGroupRequestV1 = []byte{
|
||||
0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID
|
||||
0, 0, 0, 100, // Session timeout
|
||||
0, 0, 0, 200, // Rebalance timeout
|
||||
0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID
|
||||
0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type
|
||||
0, 0, 0, 1, // 1 group protocol
|
||||
0, 3, 'o', 'n', 'e', // Protocol name
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupRequest(t *testing.T) {
|
||||
|
@ -27,20 +38,20 @@ func TestJoinGroupRequest(t *testing.T) {
|
|||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.ProtocolType = "consumer"
|
||||
testRequest(t, "no protocols", request, joinGroupRequestNoProtocols)
|
||||
testRequest(t, "V0: no protocols", request, joinGroupRequestV0_NoProtocols)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestOneProtocol(t *testing.T) {
|
||||
func TestJoinGroupRequestV0_OneProtocol(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol)
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
testRequestDecode(t, "V0: one protocol", request, packet)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
|
||||
|
@ -51,7 +62,22 @@ func TestJoinGroupRequestDeprecatedEncode(t *testing.T) {
|
|||
request.ProtocolType = "consumer"
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
packet := testRequestEncode(t, "one protocol", request, joinGroupRequestOneProtocol)
|
||||
packet := testRequestEncode(t, "V0: one protocol", request, joinGroupRequestV0_OneProtocol)
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
testRequestDecode(t, "one protocol", request, packet)
|
||||
testRequestDecode(t, "V0: one protocol", request, packet)
|
||||
}
|
||||
|
||||
func TestJoinGroupRequestV1(t *testing.T) {
|
||||
request := new(JoinGroupRequest)
|
||||
request.Version = 1
|
||||
request.GroupId = "TestGroup"
|
||||
request.SessionTimeout = 100
|
||||
request.RebalanceTimeout = 200
|
||||
request.MemberId = "OneProtocol"
|
||||
request.ProtocolType = "consumer"
|
||||
request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03})
|
||||
packet := testRequestEncode(t, "V1", request, joinGroupRequestV1)
|
||||
request.GroupProtocols = make(map[string][]byte)
|
||||
request.GroupProtocols["one"] = []byte{0x01, 0x02, 0x03}
|
||||
testRequestDecode(t, "V1", request, packet)
|
||||
}
|
||||
|
|
24
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
24
vendor/github.com/Shopify/sarama/join_group_response.go
generated
vendored
|
@ -1,6 +1,8 @@
|
|||
package sarama
|
||||
|
||||
type JoinGroupResponse struct {
|
||||
Version int16
|
||||
ThrottleTime int32
|
||||
Err KError
|
||||
GenerationId int32
|
||||
GroupProtocol string
|
||||
|
@ -22,6 +24,9 @@ func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata
|
|||
}
|
||||
|
||||
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
||||
if r.Version >= 2 {
|
||||
pe.putInt32(r.ThrottleTime)
|
||||
}
|
||||
pe.putInt16(int16(r.Err))
|
||||
pe.putInt32(r.GenerationId)
|
||||
|
||||
|
@ -53,6 +58,14 @@ func (r *JoinGroupResponse) encode(pe packetEncoder) error {
|
|||
}
|
||||
|
||||
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if version >= 2 {
|
||||
if r.ThrottleTime, err = pd.getInt32(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -107,9 +120,16 @@ func (r *JoinGroupResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (r *JoinGroupResponse) version() int16 {
|
||||
return 0
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
|
||||
return V0_9_0_0
|
||||
switch r.Version {
|
||||
case 2:
|
||||
return V0_11_0_0
|
||||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return V0_9_0_0
|
||||
}
|
||||
}
|
||||
|
|
88
vendor/github.com/Shopify/sarama/join_group_response_test.go
generated
vendored
88
vendor/github.com/Shopify/sarama/join_group_response_test.go
generated
vendored
|
@ -6,7 +6,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
joinGroupResponseNoError = []byte{
|
||||
joinGroupResponseV0_NoError = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
|
@ -15,7 +15,7 @@ var (
|
|||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseWithError = []byte{
|
||||
joinGroupResponseV0_WithError = []byte{
|
||||
0, 23, // Error: inconsistent group protocol
|
||||
0x00, 0x00, 0x00, 0x00, // Generation ID
|
||||
0, 0, // Protocol name chosen
|
||||
|
@ -24,7 +24,7 @@ var (
|
|||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseLeader = []byte{
|
||||
joinGroupResponseV0_Leader = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
|
@ -34,13 +34,32 @@ var (
|
|||
0, 3, 'f', 'o', 'o', // Member ID
|
||||
0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata
|
||||
}
|
||||
|
||||
joinGroupResponseV1 = []byte{
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'b', 'a', 'r', // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
|
||||
joinGroupResponseV2 = []byte{
|
||||
0, 0, 0, 100,
|
||||
0x00, 0x00, // No error
|
||||
0x00, 0x01, 0x02, 0x03, // Generation ID
|
||||
0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen
|
||||
0, 3, 'f', 'o', 'o', // Leader ID
|
||||
0, 3, 'b', 'a', 'r', // Member ID
|
||||
0, 0, 0, 0, // No member info
|
||||
}
|
||||
)
|
||||
|
||||
func TestJoinGroupResponse(t *testing.T) {
|
||||
func TestJoinGroupResponseV0(t *testing.T) {
|
||||
var response *JoinGroupResponse
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseV0_NoError, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
|
@ -58,7 +77,7 @@ func TestJoinGroupResponse(t *testing.T) {
|
|||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseV0_WithError, 0)
|
||||
if response.Err != ErrInconsistentGroupProtocol {
|
||||
t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err)
|
||||
}
|
||||
|
@ -76,7 +95,7 @@ func TestJoinGroupResponse(t *testing.T) {
|
|||
}
|
||||
|
||||
response = new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0)
|
||||
testVersionDecodable(t, "with error", response, joinGroupResponseV0_Leader, 0)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: ErrNoError expected but found", response.Err)
|
||||
}
|
||||
|
@ -96,3 +115,58 @@ func TestJoinGroupResponse(t *testing.T) {
|
|||
t.Error("Decoding foo member failed, found:", response.Members["foo"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinGroupResponseV1(t *testing.T) {
|
||||
response := new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseV1, 1)
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.GroupProtocol != "protocol" {
|
||||
t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "bar" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if response.Version != 1 {
|
||||
t.Error("Decoding Version failed, found:", response.Version)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinGroupResponseV2(t *testing.T) {
|
||||
response := new(JoinGroupResponse)
|
||||
testVersionDecodable(t, "no error", response, joinGroupResponseV2, 2)
|
||||
if response.ThrottleTime != 100 {
|
||||
t.Error("Decoding ThrottleTime failed, found:", response.ThrottleTime)
|
||||
}
|
||||
if response.Err != ErrNoError {
|
||||
t.Error("Decoding Err failed: no error expected but found", response.Err)
|
||||
}
|
||||
if response.GenerationId != 66051 {
|
||||
t.Error("Decoding GenerationId failed, found:", response.GenerationId)
|
||||
}
|
||||
if response.GroupProtocol != "protocol" {
|
||||
t.Error("Decoding GroupProtocol failed, found:", response.GroupProtocol)
|
||||
}
|
||||
if response.LeaderId != "foo" {
|
||||
t.Error("Decoding LeaderId failed, found:", response.LeaderId)
|
||||
}
|
||||
if response.MemberId != "bar" {
|
||||
t.Error("Decoding MemberId failed, found:", response.MemberId)
|
||||
}
|
||||
if response.Version != 2 {
|
||||
t.Error("Decoding Version failed, found:", response.Version)
|
||||
}
|
||||
if len(response.Members) != 0 {
|
||||
t.Error("Decoding Members failed, found:", response.Members)
|
||||
}
|
||||
}
|
||||
|
|
37
vendor/github.com/Shopify/sarama/message.go
generated
vendored
37
vendor/github.com/Shopify/sarama/message.go
generated
vendored
|
@ -24,13 +24,28 @@ const (
|
|||
CompressionLZ4 CompressionCodec = 3
|
||||
)
|
||||
|
||||
func (cc CompressionCodec) String() string {
|
||||
return []string{
|
||||
"none",
|
||||
"gzip",
|
||||
"snappy",
|
||||
"lz4",
|
||||
}[int(cc)]
|
||||
}
|
||||
|
||||
// CompressionLevelDefault is the constant to use in CompressionLevel
|
||||
// to have the default compression level for any codec. The value is picked
|
||||
// that we don't use any existing compression levels.
|
||||
const CompressionLevelDefault = -1000
|
||||
|
||||
type Message struct {
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
Version int8 // v1 requires Kafka 0.10
|
||||
Timestamp time.Time // the timestamp of the message (version 1+ only)
|
||||
Codec CompressionCodec // codec used to compress the message contents
|
||||
CompressionLevel int // compression level
|
||||
Key []byte // the message key, may be nil
|
||||
Value []byte // the message contents
|
||||
Set *MessageSet // the message set a message might wrap
|
||||
Version int8 // v1 requires Kafka 0.10
|
||||
Timestamp time.Time // the timestamp of the message (version 1+ only)
|
||||
|
||||
compressedCache []byte
|
||||
compressedSize int // used for computing the compression ratio metrics
|
||||
|
@ -66,7 +81,15 @@ func (m *Message) encode(pe packetEncoder) error {
|
|||
payload = m.Value
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
var writer *gzip.Writer
|
||||
if m.CompressionLevel != CompressionLevelDefault {
|
||||
writer, err = gzip.NewWriterLevel(&buf, m.CompressionLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
writer = gzip.NewWriter(&buf)
|
||||
}
|
||||
if _, err = writer.Write(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
19
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
19
vendor/github.com/Shopify/sarama/message_test.go
generated
vendored
|
@ -1,8 +1,6 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
@ -31,17 +29,6 @@ var (
|
|||
0xFF, 0xFF, 0xFF, 0xFF} // value
|
||||
|
||||
emptyGzipMessage = []byte{
|
||||
97, 79, 149, 90, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
0xFF, 0xFF, 0xFF, 0xFF, // key
|
||||
// value
|
||||
0x00, 0x00, 0x00, 0x17,
|
||||
0x1f, 0x8b,
|
||||
0x08,
|
||||
0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
emptyGzipMessage18 = []byte{
|
||||
132, 99, 80, 148, //CRC
|
||||
0x00, // magic version byte
|
||||
0x01, // attribute flags
|
||||
|
@ -107,11 +94,7 @@ func TestMessageEncoding(t *testing.T) {
|
|||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionGZIP
|
||||
if strings.HasPrefix(runtime.Version(), "go1.8") || strings.HasPrefix(runtime.Version(), "go1.9") {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage18)
|
||||
} else {
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
}
|
||||
testEncodable(t, "empty gzip", &message, emptyGzipMessage)
|
||||
|
||||
message.Value = []byte{}
|
||||
message.Codec = CompressionLZ4
|
||||
|
|
68
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
68
vendor/github.com/Shopify/sarama/metadata_request.go
generated
vendored
|
@ -1,40 +1,65 @@
|
|||
package sarama
|
||||
|
||||
type MetadataRequest struct {
|
||||
Topics []string
|
||||
Version int16
|
||||
Topics []string
|
||||
AllowAutoTopicCreation bool
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) encode(pe packetEncoder) error {
|
||||
err := pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
if r.Version < 0 || r.Version > 5 {
|
||||
return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
err = pe.putString(r.Topics[i])
|
||||
if r.Version == 0 || r.Topics != nil || len(r.Topics) > 0 {
|
||||
err := pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range r.Topics {
|
||||
err = pe.putString(r.Topics[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pe.putInt32(-1)
|
||||
}
|
||||
if r.Version > 3 {
|
||||
pe.putBool(r.AllowAutoTopicCreation)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
|
||||
topicCount, err := pd.getArrayLength()
|
||||
r.Version = version
|
||||
size, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if topicCount == 0 {
|
||||
if size < 0 {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
topicCount := size
|
||||
if topicCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.Topics = make([]string, topicCount)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
r.Topics = make([]string, topicCount)
|
||||
for i := range r.Topics {
|
||||
topic, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topics[i] = topic
|
||||
}
|
||||
}
|
||||
if r.Version > 3 {
|
||||
autoCreation, err := pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.Topics[i] = topic
|
||||
r.AllowAutoTopicCreation = autoCreation
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -44,9 +69,20 @@ func (r *MetadataRequest) key() int16 {
|
|||
}
|
||||
|
||||
func (r *MetadataRequest) version() int16 {
|
||||
return 0
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *MetadataRequest) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_0_0
|
||||
case 2:
|
||||
return V0_10_1_0
|
||||
case 3, 4:
|
||||
return V0_11_0_0
|
||||
case 5:
|
||||
return V1_0_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
|
61
vendor/github.com/Shopify/sarama/metadata_request_test.go
generated
vendored
61
vendor/github.com/Shopify/sarama/metadata_request_test.go
generated
vendored
|
@ -3,27 +3,74 @@ package sarama
|
|||
import "testing"
|
||||
|
||||
var (
|
||||
metadataRequestNoTopics = []byte{
|
||||
metadataRequestNoTopicsV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
metadataRequestOneTopic = []byte{
|
||||
metadataRequestOneTopicV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'}
|
||||
|
||||
metadataRequestThreeTopics = []byte{
|
||||
metadataRequestThreeTopicsV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x03, 'b', 'a', 'z'}
|
||||
|
||||
metadataRequestNoTopicsV1 = []byte{
|
||||
0xff, 0xff, 0xff, 0xff}
|
||||
|
||||
metadataRequestAutoCreateV4 = append(metadataRequestOneTopicV0, byte(1))
|
||||
metadataRequestNoAutoCreateV4 = append(metadataRequestOneTopicV0, byte(0))
|
||||
)
|
||||
|
||||
func TestMetadataRequest(t *testing.T) {
|
||||
func TestMetadataRequestV0(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopics)
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopicsV0)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopic)
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
|
||||
|
||||
request.Topics = []string{"foo", "bar", "baz"}
|
||||
testRequest(t, "three topics", request, metadataRequestThreeTopics)
|
||||
testRequest(t, "three topics", request, metadataRequestThreeTopicsV0)
|
||||
}
|
||||
|
||||
func TestMetadataRequestV1(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
request.Version = 1
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
|
||||
|
||||
request.Topics = []string{"foo", "bar", "baz"}
|
||||
testRequest(t, "three topics", request, metadataRequestThreeTopicsV0)
|
||||
}
|
||||
|
||||
func TestMetadataRequestV2(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
request.Version = 2
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
|
||||
}
|
||||
|
||||
func TestMetadataRequestV3(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
request.Version = 3
|
||||
testRequest(t, "no topics", request, metadataRequestNoTopicsV1)
|
||||
|
||||
request.Topics = []string{"topic1"}
|
||||
testRequest(t, "one topic", request, metadataRequestOneTopicV0)
|
||||
}
|
||||
|
||||
func TestMetadataRequestV4(t *testing.T) {
|
||||
request := new(MetadataRequest)
|
||||
request.Version = 4
|
||||
request.Topics = []string{"topic1"}
|
||||
request.AllowAutoTopicCreation = true
|
||||
testRequest(t, "one topic", request, metadataRequestAutoCreateV4)
|
||||
|
||||
request.AllowAutoTopicCreation = false
|
||||
testRequest(t, "one topic", request, metadataRequestNoAutoCreateV4)
|
||||
}
|
||||
|
|
109
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
109
vendor/github.com/Shopify/sarama/metadata_response.go
generated
vendored
|
@ -1,14 +1,15 @@
|
|||
package sarama
|
||||
|
||||
type PartitionMetadata struct {
|
||||
Err KError
|
||||
ID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
Err KError
|
||||
ID int32
|
||||
Leader int32
|
||||
Replicas []int32
|
||||
Isr []int32
|
||||
OfflineReplicas []int32
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
|
||||
func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -35,10 +36,17 @@ func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 5 {
|
||||
pm.OfflineReplicas, err = pd.getInt32Array()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
|
||||
func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(pm.Err))
|
||||
pe.putInt32(pm.ID)
|
||||
pe.putInt32(pm.Leader)
|
||||
|
@ -53,16 +61,24 @@ func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 5 {
|
||||
err = pe.putInt32Array(pm.OfflineReplicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicMetadata struct {
|
||||
Err KError
|
||||
Name string
|
||||
IsInternal bool // Only valid for Version >= 1
|
||||
Partitions []*PartitionMetadata
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
||||
func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
|
||||
tmp, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -74,6 +90,13 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
tm.IsInternal, err = pd.getBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -81,7 +104,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
|||
tm.Partitions = make([]*PartitionMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
tm.Partitions[i] = new(PartitionMetadata)
|
||||
err = tm.Partitions[i].decode(pd)
|
||||
err = tm.Partitions[i].decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,7 +113,7 @@ func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
||||
func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
|
||||
pe.putInt16(int16(tm.Err))
|
||||
|
||||
err = pe.putString(tm.Name)
|
||||
|
@ -98,13 +121,17 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
pe.putBool(tm.IsInternal)
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(tm.Partitions))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pm := range tm.Partitions {
|
||||
err = pm.encode(pe)
|
||||
err = pm.encode(pe, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,11 +141,24 @@ func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
|
|||
}
|
||||
|
||||
type MetadataResponse struct {
|
||||
Brokers []*Broker
|
||||
Topics []*TopicMetadata
|
||||
Version int16
|
||||
ThrottleTimeMs int32
|
||||
Brokers []*Broker
|
||||
ClusterID *string
|
||||
ControllerID int32
|
||||
Topics []*TopicMetadata
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
||||
r.Version = version
|
||||
|
||||
if version >= 3 {
|
||||
r.ThrottleTimeMs, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -127,12 +167,28 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
r.Brokers = make([]*Broker, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Brokers[i] = new(Broker)
|
||||
err = r.Brokers[i].decode(pd)
|
||||
err = r.Brokers[i].decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version >= 2 {
|
||||
r.ClusterID, err = pd.getNullableString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version >= 1 {
|
||||
r.ControllerID, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.ControllerID = -1
|
||||
}
|
||||
|
||||
n, err = pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -141,7 +197,7 @@ func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
|
|||
r.Topics = make([]*TopicMetadata, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r.Topics[i] = new(TopicMetadata)
|
||||
err = r.Topics[i].decode(pd)
|
||||
err = r.Topics[i].decode(pd, version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -156,18 +212,22 @@ func (r *MetadataResponse) encode(pe packetEncoder) error {
|
|||
return err
|
||||
}
|
||||
for _, broker := range r.Brokers {
|
||||
err = broker.encode(pe)
|
||||
err = broker.encode(pe, r.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Version >= 1 {
|
||||
pe.putInt32(r.ControllerID)
|
||||
}
|
||||
|
||||
err = pe.putArrayLength(len(r.Topics))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tm := range r.Topics {
|
||||
err = tm.encode(pe)
|
||||
err = tm.encode(pe, r.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -181,11 +241,22 @@ func (r *MetadataResponse) key() int16 {
|
|||
}
|
||||
|
||||
func (r *MetadataResponse) version() int16 {
|
||||
return 0
|
||||
return r.Version
|
||||
}
|
||||
|
||||
func (r *MetadataResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
switch r.Version {
|
||||
case 1:
|
||||
return V0_10_0_0
|
||||
case 2:
|
||||
return V0_10_1_0
|
||||
case 3, 4:
|
||||
return V0_11_0_0
|
||||
case 5:
|
||||
return V1_0_0_0
|
||||
default:
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
// testing API
|
||||
|
|
169
vendor/github.com/Shopify/sarama/metadata_response_test.go
generated
vendored
169
vendor/github.com/Shopify/sarama/metadata_response_test.go
generated
vendored
|
@ -3,11 +3,11 @@ package sarama
|
|||
import "testing"
|
||||
|
||||
var (
|
||||
emptyMetadataResponse = []byte{
|
||||
emptyMetadataResponseV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
brokersNoTopicsMetadataResponse = []byte{
|
||||
brokersNoTopicsMetadataResponseV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0xab, 0xff,
|
||||
|
@ -20,7 +20,7 @@ var (
|
|||
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
topicsNoBrokersMetadataResponse = []byte{
|
||||
topicsNoBrokersMetadataResponseV0 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
|
@ -36,12 +36,76 @@ var (
|
|||
0x00, 0x00,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
brokersNoTopicsMetadataResponseV1 = []byte{
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00, 0xab, 0xff,
|
||||
0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't',
|
||||
0x00, 0x00, 0x00, 0x33,
|
||||
0x00, 0x05, 'r', 'a', 'c', 'k', '0',
|
||||
|
||||
0x00, 0x01, 0x02, 0x03,
|
||||
0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm',
|
||||
0x00, 0x00, 0x01, 0x11,
|
||||
0x00, 0x05, 'r', 'a', 'c', 'k', '1',
|
||||
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
topicsNoBrokersMetadataResponseV1 = []byte{
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x00, 0x00, 0x04,
|
||||
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x07,
|
||||
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'b', 'a', 'r',
|
||||
0x01,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
noBrokersNoTopicsWithThrottleTimeAndClusterIDV3 = []byte{
|
||||
0x00, 0x00, 0x00, 0x10,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd',
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x00}
|
||||
|
||||
noBrokersOneTopicWithOfflineReplicasV5 = []byte{
|
||||
0x00, 0x00, 0x00, 0x05,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x09, 'c', 'l', 'u', 's', 't', 'e', 'r', 'I', 'd',
|
||||
0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00,
|
||||
0x00, 0x03, 'f', 'o', 'o',
|
||||
0x00,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x04,
|
||||
0x00, 0x00, 0x00, 0x01,
|
||||
0x00, 0x00, 0x00, 0x07,
|
||||
0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03,
|
||||
0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
|
||||
0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
|
||||
}
|
||||
)
|
||||
|
||||
func TestEmptyMetadataResponse(t *testing.T) {
|
||||
func TestEmptyMetadataResponseV0(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0)
|
||||
testVersionDecodable(t, "empty, V0", &response, emptyMetadataResponseV0, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
|
@ -50,10 +114,10 @@ func TestEmptyMetadataResponse(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithBrokers(t *testing.T) {
|
||||
func TestMetadataResponseWithBrokersV0(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0)
|
||||
testVersionDecodable(t, "brokers, no topics, V0", &response, brokersNoTopicsMetadataResponseV0, 0)
|
||||
if len(response.Brokers) != 2 {
|
||||
t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!")
|
||||
}
|
||||
|
@ -76,10 +140,10 @@ func TestMetadataResponseWithBrokers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithTopics(t *testing.T) {
|
||||
func TestMetadataResponseWithTopicsV0(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0)
|
||||
testVersionDecodable(t, "topics, no brokers, V0", &response, topicsNoBrokersMetadataResponseV0, 0)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
|
@ -137,3 +201,90 @@ func TestMetadataResponseWithTopics(t *testing.T) {
|
|||
t.Error("Decoding produced invalid partition count for topic 1.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithBrokersV1(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "topics, V1", &response, brokersNoTopicsMetadataResponseV1, 1)
|
||||
if len(response.Brokers) != 2 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were 2!")
|
||||
}
|
||||
if response.Brokers[0].rack == nil || *response.Brokers[0].rack != "rack0" {
|
||||
t.Error("Decoding produced invalid broker 0 rack.")
|
||||
}
|
||||
if response.Brokers[1].rack == nil || *response.Brokers[1].rack != "rack1" {
|
||||
t.Error("Decoding produced invalid broker 1 rack.")
|
||||
}
|
||||
if response.ControllerID != 1 {
|
||||
t.Error("Decoding produced", response.ControllerID, "should have been 1!")
|
||||
}
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithTopicsV1(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "topics, V1", &response, topicsNoBrokersMetadataResponseV1, 1)
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!")
|
||||
}
|
||||
if response.ControllerID != 4 {
|
||||
t.Error("Decoding produced", response.ControllerID, "should have been 4!")
|
||||
}
|
||||
if len(response.Topics) != 2 {
|
||||
t.Error("Decoding produced", len(response.Topics), "topics where there were 2!")
|
||||
}
|
||||
if response.Topics[0].IsInternal {
|
||||
t.Error("Decoding produced", response.Topics[0], "topic0 should have been false!")
|
||||
}
|
||||
if !response.Topics[1].IsInternal {
|
||||
t.Error("Decoding produced", response.Topics[1], "topic1 should have been true!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithThrottleTime(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "no topics, no brokers, throttle time and cluster Id V3", &response, noBrokersNoTopicsWithThrottleTimeAndClusterIDV3, 3)
|
||||
if response.ThrottleTimeMs != int32(16) {
|
||||
t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 16!")
|
||||
}
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", response.Brokers, "should have been 0!")
|
||||
}
|
||||
if response.ControllerID != int32(1) {
|
||||
t.Error("Decoding produced", response.ControllerID, "should have been 1!")
|
||||
}
|
||||
if *response.ClusterID != "clusterId" {
|
||||
t.Error("Decoding produced", response.ClusterID, "should have been clusterId!")
|
||||
}
|
||||
if len(response.Topics) != 0 {
|
||||
t.Error("Decoding produced", len(response.Topics), "should have been 0!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataResponseWithOfflineReplicasV5(t *testing.T) {
|
||||
response := MetadataResponse{}
|
||||
|
||||
testVersionDecodable(t, "no brokers, 1 topic with offline replica V5", &response, noBrokersOneTopicWithOfflineReplicasV5, 5)
|
||||
if response.ThrottleTimeMs != int32(5) {
|
||||
t.Error("Decoding produced", response.ThrottleTimeMs, "should have been 5!")
|
||||
}
|
||||
if len(response.Brokers) != 0 {
|
||||
t.Error("Decoding produced", response.Brokers, "should have been 0!")
|
||||
}
|
||||
if response.ControllerID != int32(2) {
|
||||
t.Error("Decoding produced", response.ControllerID, "should have been 21!")
|
||||
}
|
||||
if *response.ClusterID != "clusterId" {
|
||||
t.Error("Decoding produced", response.ClusterID, "should have been clusterId!")
|
||||
}
|
||||
if len(response.Topics) != 1 {
|
||||
t.Error("Decoding produced", len(response.Topics), "should have been 1!")
|
||||
}
|
||||
if len(response.Topics[0].Partitions[0].OfflineReplicas) != 1 {
|
||||
t.Error("Decoding produced", len(response.Topics[0].Partitions[0].OfflineReplicas), "should have been 1!")
|
||||
}
|
||||
}
|
||||
|
|
71
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
71
vendor/github.com/Shopify/sarama/mockresponses.go
generated
vendored
|
@ -68,9 +68,10 @@ func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
|
|||
|
||||
// MockMetadataResponse is a `MetadataResponse` builder.
|
||||
type MockMetadataResponse struct {
|
||||
leaders map[string]map[int32]int32
|
||||
brokers map[string]int32
|
||||
t TestReporter
|
||||
controllerID int32
|
||||
leaders map[string]map[int32]int32
|
||||
brokers map[string]int32
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
|
||||
|
@ -96,9 +97,17 @@ func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMet
|
|||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
|
||||
mmr.controllerID = brokerID
|
||||
return mmr
|
||||
}
|
||||
|
||||
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
|
||||
metadataRequest := reqBody.(*MetadataRequest)
|
||||
metadataResponse := &MetadataResponse{}
|
||||
metadataResponse := &MetadataResponse{
|
||||
Version: metadataRequest.version(),
|
||||
ControllerID: mmr.controllerID,
|
||||
}
|
||||
for addr, brokerID := range mmr.brokers {
|
||||
metadataResponse.AddBroker(addr, brokerID)
|
||||
}
|
||||
|
@ -326,6 +335,60 @@ func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
|
|||
return res
|
||||
}
|
||||
|
||||
// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
|
||||
type MockFindCoordinatorResponse struct {
|
||||
groupCoordinators map[string]interface{}
|
||||
transCoordinators map[string]interface{}
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
|
||||
return &MockFindCoordinatorResponse{
|
||||
groupCoordinators: make(map[string]interface{}),
|
||||
transCoordinators: make(map[string]interface{}),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
|
||||
switch coordinatorType {
|
||||
case CoordinatorGroup:
|
||||
mr.groupCoordinators[group] = broker
|
||||
case CoordinatorTransaction:
|
||||
mr.transCoordinators[group] = broker
|
||||
}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
|
||||
switch coordinatorType {
|
||||
case CoordinatorGroup:
|
||||
mr.groupCoordinators[group] = kerror
|
||||
case CoordinatorTransaction:
|
||||
mr.transCoordinators[group] = kerror
|
||||
}
|
||||
return mr
|
||||
}
|
||||
|
||||
func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
|
||||
req := reqBody.(*FindCoordinatorRequest)
|
||||
res := &FindCoordinatorResponse{}
|
||||
var v interface{}
|
||||
switch req.CoordinatorType {
|
||||
case CoordinatorGroup:
|
||||
v = mr.groupCoordinators[req.CoordinatorKey]
|
||||
case CoordinatorTransaction:
|
||||
v = mr.transCoordinators[req.CoordinatorKey]
|
||||
}
|
||||
switch v := v.(type) {
|
||||
case *MockBroker:
|
||||
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
|
||||
case KError:
|
||||
res.Err = v
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
|
||||
type MockOffsetCommitResponse struct {
|
||||
errors map[string]map[string]map[int32]KError
|
||||
|
|
16
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
16
vendor/github.com/Shopify/sarama/offset_commit_request.go
generated
vendored
|
@ -1,5 +1,7 @@
|
|||
package sarama
|
||||
|
||||
import "errors"
|
||||
|
||||
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
|
||||
// tells the broker to set the timestamp to the time at which the request was received.
|
||||
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
|
||||
|
@ -173,7 +175,7 @@ func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
|
|||
case 2:
|
||||
return V0_9_0_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,3 +190,15 @@ func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset i
|
|||
|
||||
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
|
||||
}
|
||||
|
||||
func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
|
||||
partitions := r.blocks[topic]
|
||||
if partitions == nil {
|
||||
return 0, "", errors.New("No such offset")
|
||||
}
|
||||
block := partitions[partitionID]
|
||||
if block == nil {
|
||||
return 0, "", errors.New("No such offset")
|
||||
}
|
||||
return block.offset, block.metadata, nil
|
||||
}
|
||||
|
|
2
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
2
vendor/github.com/Shopify/sarama/offset_commit_response.go
generated
vendored
|
@ -81,5 +81,5 @@ func (r *OffsetCommitResponse) version() int16 {
|
|||
}
|
||||
|
||||
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
|
|
2
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
2
vendor/github.com/Shopify/sarama/offset_fetch_request.go
generated
vendored
|
@ -68,7 +68,7 @@ func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
|
|||
case 1:
|
||||
return V0_8_2_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
2
vendor/github.com/Shopify/sarama/offset_fetch_response.go
generated
vendored
|
@ -115,7 +115,7 @@ func (r *OffsetFetchResponse) version() int16 {
|
|||
}
|
||||
|
||||
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
|
||||
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
|
||||
|
|
2
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
2
vendor/github.com/Shopify/sarama/offset_request.go
generated
vendored
|
@ -109,7 +109,7 @@ func (r *OffsetRequest) requiredVersion() KafkaVersion {
|
|||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
2
vendor/github.com/Shopify/sarama/offset_response.go
generated
vendored
|
@ -155,7 +155,7 @@ func (r *OffsetResponse) requiredVersion() KafkaVersion {
|
|||
case 1:
|
||||
return V0_10_1_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
8
vendor/github.com/Shopify/sarama/produce_request.go
generated
vendored
|
@ -113,9 +113,9 @@ func (r *ProduceRequest) encode(pe packetEncoder) error {
|
|||
}
|
||||
if metricRegistry != nil {
|
||||
if r.Version >= 3 {
|
||||
topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
} else {
|
||||
topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
|
||||
}
|
||||
batchSize := int64(pe.offset() - startOffset)
|
||||
batchSizeMetric.Update(batchSize)
|
||||
|
@ -215,7 +215,7 @@ func (r *ProduceRequest) requiredVersion() KafkaVersion {
|
|||
case 3:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
|
|||
|
||||
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
|
||||
r.ensureRecords(topic, partition)
|
||||
set := r.records[topic][partition].msgSet
|
||||
set := r.records[topic][partition].MsgSet
|
||||
|
||||
if set == nil {
|
||||
set = new(MessageSet)
|
||||
|
|
4
vendor/github.com/Shopify/sarama/produce_request_test.go
generated
vendored
4
vendor/github.com/Shopify/sarama/produce_request_test.go
generated
vendored
|
@ -99,6 +99,8 @@ func TestProduceRequest(t *testing.T) {
|
|||
}
|
||||
request.AddBatch("topic", 0xAD, batch)
|
||||
packet := testRequestEncode(t, "one record", request, produceRequestOneRecord)
|
||||
batch.Records[0].length.startOffset = 0
|
||||
// compressRecords field is not populated on decoding because consumers
|
||||
// are only interested in decoded records.
|
||||
batch.compressedRecords = nil
|
||||
testRequestDecode(t, "one record", request, packet)
|
||||
}
|
||||
|
|
2
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
2
vendor/github.com/Shopify/sarama/produce_response.go
generated
vendored
|
@ -152,7 +152,7 @@ func (r *ProduceResponse) requiredVersion() KafkaVersion {
|
|||
case 3:
|
||||
return V0_11_0_0
|
||||
default:
|
||||
return minVersion
|
||||
return MinVersion
|
||||
}
|
||||
}
|
||||
|
||||
|
|
41
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
41
vendor/github.com/Shopify/sarama/produce_set.go
generated
vendored
|
@ -59,10 +59,11 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
|
|||
if set == nil {
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
|
||||
batch := &RecordBatch{
|
||||
FirstTimestamp: timestamp,
|
||||
Version: 2,
|
||||
ProducerID: -1, /* No producer id */
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
FirstTimestamp: timestamp,
|
||||
Version: 2,
|
||||
ProducerID: -1, /* No producer id */
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
|
||||
}
|
||||
set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
|
||||
size = recordBatchOverhead
|
||||
|
@ -79,7 +80,7 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
|
|||
rec := &Record{
|
||||
Key: key,
|
||||
Value: val,
|
||||
TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp),
|
||||
TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
|
||||
}
|
||||
size += len(key) + len(val)
|
||||
if len(msg.Headers) > 0 {
|
||||
|
@ -89,14 +90,14 @@ func (ps *produceSet) add(msg *ProducerMessage) error {
|
|||
size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
|
||||
}
|
||||
}
|
||||
set.recordsToSend.recordBatch.addRecord(rec)
|
||||
set.recordsToSend.RecordBatch.addRecord(rec)
|
||||
} else {
|
||||
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
msgToSend.Timestamp = timestamp
|
||||
msgToSend.Version = 1
|
||||
}
|
||||
set.recordsToSend.msgSet.addMessage(msgToSend)
|
||||
set.recordsToSend.MsgSet.addMessage(msgToSend)
|
||||
size = producerMessageOverhead + len(key) + len(val)
|
||||
}
|
||||
|
||||
|
@ -122,7 +123,14 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
|
|||
for topic, partitionSet := range ps.msgs {
|
||||
for partition, set := range partitionSet {
|
||||
if req.Version >= 3 {
|
||||
rb := set.recordsToSend.recordBatch
|
||||
// If the API version we're hitting is 3 or greater, we need to calculate
|
||||
// offsets for each record in the batch relative to FirstOffset.
|
||||
// Additionally, we must set LastOffsetDelta to the value of the last offset
|
||||
// in the batch. Since the OffsetDelta of the first record is 0, we know that the
|
||||
// final record of any batch will have an offset of (# of records in batch) - 1.
|
||||
// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
|
||||
// under the RecordBatch section for details.)
|
||||
rb := set.recordsToSend.RecordBatch
|
||||
if len(rb.Records) > 0 {
|
||||
rb.LastOffsetDelta = int32(len(rb.Records) - 1)
|
||||
for i, record := range rb.Records {
|
||||
|
@ -134,7 +142,7 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
|
|||
continue
|
||||
}
|
||||
if ps.parent.conf.Producer.Compression == CompressionNone {
|
||||
req.AddSet(topic, partition, set.recordsToSend.msgSet)
|
||||
req.AddSet(topic, partition, set.recordsToSend.MsgSet)
|
||||
} else {
|
||||
// When compression is enabled, the entire set for each partition is compressed
|
||||
// and sent as the payload of a single fake "message" with the appropriate codec
|
||||
|
@ -147,24 +155,25 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
|
|||
// recompressing the message set.
|
||||
// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
|
||||
// for details on relative offsets.)
|
||||
for i, msg := range set.recordsToSend.msgSet.Messages {
|
||||
for i, msg := range set.recordsToSend.MsgSet.Messages {
|
||||
msg.Offset = int64(i)
|
||||
}
|
||||
}
|
||||
payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
|
||||
payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
}
|
||||
compMsg := &Message{
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
Key: nil,
|
||||
Value: payload,
|
||||
Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics
|
||||
Codec: ps.parent.conf.Producer.Compression,
|
||||
CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
|
||||
Key: nil,
|
||||
Value: payload,
|
||||
Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
|
||||
}
|
||||
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
|
||||
compMsg.Version = 1
|
||||
compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp
|
||||
compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
|
||||
}
|
||||
req.AddMessage(topic, partition, compMsg)
|
||||
}
|
||||
|
|
4
vendor/github.com/Shopify/sarama/produce_set_test.go
generated
vendored
4
vendor/github.com/Shopify/sarama/produce_set_test.go
generated
vendored
|
@ -167,7 +167,7 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
|
|||
t.Error("Wrong request version")
|
||||
}
|
||||
|
||||
for _, msgBlock := range req.records["t1"][0].msgSet.Messages {
|
||||
for _, msgBlock := range req.records["t1"][0].MsgSet.Messages {
|
||||
msg := msgBlock.Msg
|
||||
err := msg.decodeSet()
|
||||
if err != nil {
|
||||
|
@ -227,7 +227,7 @@ func TestProduceSetV3RequestBuilding(t *testing.T) {
|
|||
t.Error("Wrong request version")
|
||||
}
|
||||
|
||||
batch := req.records["t1"][0].recordBatch
|
||||
batch := req.records["t1"][0].RecordBatch
|
||||
if batch.FirstTimestamp != now {
|
||||
t.Errorf("Wrong first timestamp: %v", batch.FirstTimestamp)
|
||||
}
|
||||
|
|
11
vendor/github.com/Shopify/sarama/record_batch.go
generated
vendored
11
vendor/github.com/Shopify/sarama/record_batch.go
generated
vendored
|
@ -40,6 +40,7 @@ type RecordBatch struct {
|
|||
PartitionLeaderEpoch int32
|
||||
Version int8
|
||||
Codec CompressionCodec
|
||||
CompressionLevel int
|
||||
Control bool
|
||||
LastOffsetDelta int32
|
||||
FirstTimestamp time.Time
|
||||
|
@ -219,7 +220,15 @@ func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
|
|||
b.compressedRecords = raw
|
||||
case CompressionGZIP:
|
||||
var buf bytes.Buffer
|
||||
writer := gzip.NewWriter(&buf)
|
||||
var writer *gzip.Writer
|
||||
if b.CompressionLevel != CompressionLevelDefault {
|
||||
writer, err = gzip.NewWriterLevel(&buf, b.CompressionLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
writer = gzip.NewWriter(&buf)
|
||||
}
|
||||
if _, err := writer.Write(raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
14
vendor/github.com/Shopify/sarama/record_test.go
generated
vendored
14
vendor/github.com/Shopify/sarama/record_test.go
generated
vendored
|
@ -116,11 +116,12 @@ var recordBatchTestCases = []struct {
|
|||
{
|
||||
name: "gzipped record",
|
||||
batch: RecordBatch{
|
||||
Version: 2,
|
||||
Codec: CompressionGZIP,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
LastOffsetDelta: 0,
|
||||
Version: 2,
|
||||
Codec: CompressionGZIP,
|
||||
CompressionLevel: CompressionLevelDefault,
|
||||
FirstTimestamp: time.Unix(1479847795, 0),
|
||||
MaxTimestamp: time.Unix(0, 0),
|
||||
LastOffsetDelta: 0,
|
||||
Records: []*Record{{
|
||||
TimestampDelta: 5 * time.Millisecond,
|
||||
Key: []byte{1, 2, 3, 4},
|
||||
|
@ -281,6 +282,9 @@ func TestRecordBatchDecoding(t *testing.T) {
|
|||
for _, r := range tc.batch.Records {
|
||||
r.length = varintLengthField{}
|
||||
}
|
||||
// The compression level is not restored on decoding. It is not needed
|
||||
// anyway. We only set it here to ensure that comparision succeeds.
|
||||
batch.CompressionLevel = tc.batch.CompressionLevel
|
||||
if !reflect.DeepEqual(batch, tc.batch) {
|
||||
t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch))
|
||||
}
|
||||
|
|
54
vendor/github.com/Shopify/sarama/records.go
generated
vendored
54
vendor/github.com/Shopify/sarama/records.go
generated
vendored
|
@ -14,30 +14,30 @@ const (
|
|||
// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
|
||||
type Records struct {
|
||||
recordsType int
|
||||
msgSet *MessageSet
|
||||
recordBatch *RecordBatch
|
||||
MsgSet *MessageSet
|
||||
RecordBatch *RecordBatch
|
||||
}
|
||||
|
||||
func newLegacyRecords(msgSet *MessageSet) Records {
|
||||
return Records{recordsType: legacyRecords, msgSet: msgSet}
|
||||
return Records{recordsType: legacyRecords, MsgSet: msgSet}
|
||||
}
|
||||
|
||||
func newDefaultRecords(batch *RecordBatch) Records {
|
||||
return Records{recordsType: defaultRecords, recordBatch: batch}
|
||||
return Records{recordsType: defaultRecords, RecordBatch: batch}
|
||||
}
|
||||
|
||||
// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
|
||||
// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
|
||||
// The first return value indicates whether both fields are nil (and the type is not set).
|
||||
// If both fields are not nil, it returns an error.
|
||||
func (r *Records) setTypeFromFields() (bool, error) {
|
||||
if r.msgSet == nil && r.recordBatch == nil {
|
||||
if r.MsgSet == nil && r.RecordBatch == nil {
|
||||
return true, nil
|
||||
}
|
||||
if r.msgSet != nil && r.recordBatch != nil {
|
||||
return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
|
||||
if r.MsgSet != nil && r.RecordBatch != nil {
|
||||
return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
|
||||
}
|
||||
r.recordsType = defaultRecords
|
||||
if r.msgSet != nil {
|
||||
if r.MsgSet != nil {
|
||||
r.recordsType = legacyRecords
|
||||
}
|
||||
return false, nil
|
||||
|
@ -52,15 +52,15 @@ func (r *Records) encode(pe packetEncoder) error {
|
|||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
if r.MsgSet == nil {
|
||||
return nil
|
||||
}
|
||||
return r.msgSet.encode(pe)
|
||||
return r.MsgSet.encode(pe)
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
if r.RecordBatch == nil {
|
||||
return nil
|
||||
}
|
||||
return r.recordBatch.encode(pe)
|
||||
return r.RecordBatch.encode(pe)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
|
@ -89,11 +89,11 @@ func (r *Records) decode(pd packetDecoder) error {
|
|||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
r.msgSet = &MessageSet{}
|
||||
return r.msgSet.decode(pd)
|
||||
r.MsgSet = &MessageSet{}
|
||||
return r.MsgSet.decode(pd)
|
||||
case defaultRecords:
|
||||
r.recordBatch = &RecordBatch{}
|
||||
return r.recordBatch.decode(pd)
|
||||
r.RecordBatch = &RecordBatch{}
|
||||
return r.RecordBatch.decode(pd)
|
||||
}
|
||||
return fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
@ -107,15 +107,15 @@ func (r *Records) numRecords() (int, error) {
|
|||
|
||||
switch r.recordsType {
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
if r.MsgSet == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return len(r.msgSet.Messages), nil
|
||||
return len(r.MsgSet.Messages), nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
if r.RecordBatch == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return len(r.recordBatch.Records), nil
|
||||
return len(r.RecordBatch.Records), nil
|
||||
}
|
||||
return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
@ -131,15 +131,15 @@ func (r *Records) isPartial() (bool, error) {
|
|||
case unknownRecords:
|
||||
return false, nil
|
||||
case legacyRecords:
|
||||
if r.msgSet == nil {
|
||||
if r.MsgSet == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.msgSet.PartialTrailingMessage, nil
|
||||
return r.MsgSet.PartialTrailingMessage, nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
if r.RecordBatch == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.recordBatch.PartialTrailingRecord, nil
|
||||
return r.RecordBatch.PartialTrailingRecord, nil
|
||||
}
|
||||
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
@ -155,10 +155,10 @@ func (r *Records) isControl() (bool, error) {
|
|||
case legacyRecords:
|
||||
return false, nil
|
||||
case defaultRecords:
|
||||
if r.recordBatch == nil {
|
||||
if r.RecordBatch == nil {
|
||||
return false, nil
|
||||
}
|
||||
return r.recordBatch.Control, nil
|
||||
return r.RecordBatch.Control, nil
|
||||
}
|
||||
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
|
||||
}
|
||||
|
|
8
vendor/github.com/Shopify/sarama/records_test.go
generated
vendored
8
vendor/github.com/Shopify/sarama/records_test.go
generated
vendored
|
@ -45,8 +45,8 @@ func TestLegacyRecords(t *testing.T) {
|
|||
if r.recordsType != legacyRecords {
|
||||
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, legacyRecords)
|
||||
}
|
||||
if !reflect.DeepEqual(set, r.msgSet) {
|
||||
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.msgSet)
|
||||
if !reflect.DeepEqual(set, r.MsgSet) {
|
||||
t.Errorf("Wrong decoding for legacy records, wanted %#+v, got %#+v", set, r.MsgSet)
|
||||
}
|
||||
|
||||
n, err := r.numRecords()
|
||||
|
@ -113,8 +113,8 @@ func TestDefaultRecords(t *testing.T) {
|
|||
if r.recordsType != defaultRecords {
|
||||
t.Fatalf("Wrong records type %v, expected %v", r.recordsType, defaultRecords)
|
||||
}
|
||||
if !reflect.DeepEqual(batch, r.recordBatch) {
|
||||
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.recordBatch)
|
||||
if !reflect.DeepEqual(batch, r.RecordBatch) {
|
||||
t.Errorf("Wrong decoding for default records, wanted %#+v, got %#+v", batch, r.RecordBatch)
|
||||
}
|
||||
|
||||
n, err := r.numRecords()
|
||||
|
|
6
vendor/github.com/Shopify/sarama/request.go
generated
vendored
6
vendor/github.com/Shopify/sarama/request.go
generated
vendored
|
@ -97,7 +97,7 @@ func allocateBody(key, version int16) protocolBody {
|
|||
case 9:
|
||||
return &OffsetFetchRequest{}
|
||||
case 10:
|
||||
return &ConsumerMetadataRequest{}
|
||||
return &FindCoordinatorRequest{}
|
||||
case 11:
|
||||
return &JoinGroupRequest{}
|
||||
case 12:
|
||||
|
@ -118,6 +118,8 @@ func allocateBody(key, version int16) protocolBody {
|
|||
return &CreateTopicsRequest{}
|
||||
case 20:
|
||||
return &DeleteTopicsRequest{}
|
||||
case 21:
|
||||
return &DeleteRecordsRequest{}
|
||||
case 22:
|
||||
return &InitProducerIDRequest{}
|
||||
case 24:
|
||||
|
@ -140,6 +142,8 @@ func allocateBody(key, version int16) protocolBody {
|
|||
return &AlterConfigsRequest{}
|
||||
case 37:
|
||||
return &CreatePartitionsRequest{}
|
||||
case 42:
|
||||
return &DeleteGroupsRequest{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
5
vendor/github.com/Shopify/sarama/request_test.go
generated
vendored
5
vendor/github.com/Shopify/sarama/request_test.go
generated
vendored
|
@ -50,6 +50,9 @@ func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []
|
|||
}
|
||||
|
||||
func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) {
|
||||
if !rb.requiredVersion().IsAtLeast(MinVersion) {
|
||||
t.Errorf("Request %s has invalid required version", name)
|
||||
}
|
||||
packet := testRequestEncode(t, name, rb, expected)
|
||||
testRequestDecode(t, name, rb, packet)
|
||||
}
|
||||
|
@ -76,6 +79,8 @@ func testRequestDecode(t *testing.T, name string, rb protocolBody, packet []byte
|
|||
t.Error(spew.Sprintf("Decoded request %q does not match the encoded one\nencoded: %+v\ndecoded: %+v", name, rb, decoded.body))
|
||||
} else if n != len(packet) {
|
||||
t.Errorf("Decoded request %q bytes: %d does not match the encoded one: %d\n", name, n, len(packet))
|
||||
} else if rb.version() != decoded.body.version() {
|
||||
t.Errorf("Decoded request %q version: %d does not match the encoded one: %d\n", name, decoded.body.version(), rb.version())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
54
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
54
vendor/github.com/Shopify/sarama/utils.go
generated
vendored
|
@ -139,21 +139,49 @@ func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
|
|||
|
||||
// Effective constants defining the supported kafka versions.
|
||||
var (
|
||||
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
|
||||
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
|
||||
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
|
||||
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
|
||||
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
|
||||
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
|
||||
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
|
||||
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
|
||||
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
|
||||
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
|
||||
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
|
||||
minVersion = V0_8_2_0
|
||||
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
|
||||
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
|
||||
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
|
||||
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
|
||||
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
|
||||
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
|
||||
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
|
||||
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
|
||||
V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
|
||||
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
|
||||
V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
|
||||
V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
|
||||
V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
|
||||
V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
|
||||
V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
|
||||
V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
|
||||
|
||||
SupportedVersions = []KafkaVersion{
|
||||
V0_8_2_0,
|
||||
V0_8_2_1,
|
||||
V0_8_2_2,
|
||||
V0_9_0_0,
|
||||
V0_9_0_1,
|
||||
V0_10_0_0,
|
||||
V0_10_0_1,
|
||||
V0_10_1_0,
|
||||
V0_10_1_1,
|
||||
V0_10_2_0,
|
||||
V0_10_2_1,
|
||||
V0_11_0_0,
|
||||
V0_11_0_1,
|
||||
V0_11_0_2,
|
||||
V1_0_0_0,
|
||||
V1_1_0_0,
|
||||
}
|
||||
MinVersion = V0_8_2_0
|
||||
MaxVersion = V1_1_0_0
|
||||
)
|
||||
|
||||
func ParseKafkaVersion(s string) (KafkaVersion, error) {
|
||||
if len(s) < 5 {
|
||||
return MinVersion, fmt.Errorf("invalid version `%s`", s)
|
||||
}
|
||||
var major, minor, veryMinor, patch uint
|
||||
var err error
|
||||
if s[0] == '0' {
|
||||
|
@ -162,7 +190,7 @@ func ParseKafkaVersion(s string) (KafkaVersion, error) {
|
|||
err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
|
||||
}
|
||||
if err != nil {
|
||||
return minVersion, err
|
||||
return MinVersion, err
|
||||
}
|
||||
return newKafkaVersion(major, minor, veryMinor, patch), nil
|
||||
}
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/.github/ISSUE_TEMPLATE.md
generated
vendored
|
@ -10,5 +10,5 @@ Please fill out the sections below to help us address your issue.
|
|||
|
||||
### Steps to reproduce
|
||||
|
||||
If you have have an runnable example, please include it.
|
||||
If you have an runnable example, please include it.
|
||||
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/.godoc_config
generated
vendored
|
@ -3,7 +3,7 @@
|
|||
"Pattern": "/sdk-for-go/api/",
|
||||
"StripPrefix": "/sdk-for-go/api",
|
||||
"Include": ["/src/github.com/aws/aws-sdk-go/aws", "/src/github.com/aws/aws-sdk-go/service"],
|
||||
"Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate"],
|
||||
"Exclude": ["/src/cmd", "/src/github.com/aws/aws-sdk-go/awstesting", "/src/github.com/aws/aws-sdk-go/awsmigrate", "/src/github.com/aws/aws-sdk-go/private"],
|
||||
"IgnoredSuffixes": ["iface"]
|
||||
},
|
||||
"Github": {
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/.travis.yml
generated
vendored
|
@ -40,11 +40,8 @@ matrix:
|
|||
- os: osx
|
||||
go: tip
|
||||
|
||||
install:
|
||||
- make get-deps
|
||||
|
||||
script:
|
||||
- make unit-with-race-cover
|
||||
- make ci-test
|
||||
|
||||
branches:
|
||||
only:
|
||||
|
|
309
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
309
vendor/github.com/aws/aws-sdk-go/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,306 @@
|
|||
Release v1.14.17 (2018-06-29)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/secretsmanager`: Updates service examples
|
||||
* New SDK code snippet examples for the new APIs released for the Resource-based Policy support in Secrets Manager
|
||||
|
||||
Release v1.14.16 (2018-06-28)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/elasticbeanstalk`: Updates service API, documentation, and examples
|
||||
* Elastic Beanstalk adds "Suspended" health status to the EnvironmentHealthStatus enum type and updates document.
|
||||
* `service/lambda`: Updates service API and documentation
|
||||
* Support for SQS as an event source.
|
||||
* `service/storagegateway`: Updates service API, documentation, and examples
|
||||
* AWS Storage Gateway now enables you to use Server Message Block (SMB) protocol to store and access objects in Amazon Simple Storage Service (S3).
|
||||
|
||||
Release v1.14.15 (2018-06-27)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/cloudfront`: Updates service API and documentation
|
||||
* Unpublish delete-service-linked-role API.
|
||||
* `service/codepipeline`: Updates service API
|
||||
* UpdatePipeline may now throw a LimitExceededException when adding or updating Source Actions that use periodic checks for change detection
|
||||
* `service/comprehend`: Updates service API, documentation, and paginators
|
||||
* `service/secretsmanager`: Updates service documentation, paginators, and examples
|
||||
* Documentation updates for secretsmanager
|
||||
|
||||
### SDK Bugs
|
||||
* `aws/csm`: Final API Call Attempt events were not being called [#2008](https://github.com/aws/aws-sdk-go/pull/2008)
|
||||
Release v1.14.14 (2018-06-26)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/inspector`: Updates service API, documentation, and paginators
|
||||
* Introduce four new APIs to view and preview Exclusions. Exclusions show which intended security checks are excluded from an assessment, along with reasons and recommendations to fix. The APIs are CreateExclusionsPreview, GetExclusionsPreview, ListExclusions, and DescribeExclusions.
|
||||
* `service/s3`: Updates service API and documentation
|
||||
* Add AllowQuotedRecordDelimiter to Amazon S3 Select API. Please refer to https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html for usage details.
|
||||
* `service/secretsmanager`: Updates service API, documentation, paginators, and examples
|
||||
* This release adds support for resource-based policies that attach directly to your secrets. These policies provide an additional way to control who can access your secrets and what they can do with them. For more information, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html in the Secrets Manager User Guide.
|
||||
|
||||
Release v1.14.13 (2018-06-22)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/alexaforbusiness`: Updates service API and documentation
|
||||
* `service/appstream`: Updates service API, documentation, paginators, and examples
|
||||
* This API update enables customers to find their VPC private IP address and ENI ID associated with AppStream streaming sessions.
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
|
||||
Release v1.14.12 (2018-06-21)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/clouddirectory`: Adds new service
|
||||
* SDK release to support Flexible Schema initiative being carried out by Amazon Cloud Directory. This feature lets customers using new capabilities like: variant typed attributes, dynamic facets and AWS managed Cloud Directory schemas.
|
||||
|
||||
Release v1.14.11 (2018-06-21)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/macie`: Adds new service
|
||||
* Amazon Macie is a security service that uses machine learning to automatically discover, classify, and protect sensitive data in AWS. With this release, we are launching the following Macie HTTPS API operations: AssociateMemberAccount, AssociateS3Resources, DisassociateMemberAccount, DisassociateS3Resources, ListMemberAccounts, ListS3Resources, and UpdateS3Resources. With these API operations you can issue HTTPS requests directly to the service.
|
||||
* `service/neptune`: Updates service API, documentation, and examples
|
||||
* Deprecates the PubliclyAccessible parameter that is not supported by Amazon Neptune.
|
||||
* `service/ssm`: Updates service API, documentation, and examples
|
||||
* Adds Amazon Linux 2 support to Patch Manager
|
||||
|
||||
Release v1.14.10 (2018-06-20)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/acm-pca`: Updates service API, documentation, paginators, and examples
|
||||
* `service/medialive`: Updates service API, documentation, and paginators
|
||||
* AWS Elemental MediaLive now makes Reserved Outputs and Inputs available through the AWS Management Console and API. You can reserve outputs and inputs with a 12 month commitment in exchange for discounted hourly rates. Pricing is available at https://aws.amazon.com/medialive/pricing/
|
||||
* `service/rds`: Updates service API, documentation, and examples
|
||||
* This release adds a new parameter to specify the retention period for Performance Insights data for RDS instances. You can either choose 7 days (default) or 731 days. For more information, see Amazon RDS Documentation.
|
||||
|
||||
### SDK Enhancements
|
||||
* `service/s3`: Update SelectObjectContent doc example to be on the API not nested type. ([#1991](https://github.com/aws/aws-sdk-go/pull/1991))
|
||||
|
||||
### SDK Bugs
|
||||
* `aws/client`: Fix HTTP debug log EventStream payloads ([#2000](https://github.com/aws/aws-sdk-go/pull/2000))
|
||||
* Fixes the SDK's HTTP client debug logging to not log the HTTP response body for EventStreams. This prevents the SDK from buffering a very large amount of data to be logged at once. The aws.LogDebugWithEventStreamBody should be used to log the event stream events.
|
||||
* Fixes a bug in the SDK's response logger which will buffer the response body's content if LogDebug is enabled but LogDebugWithHTTPBody is not.
|
||||
Release v1.14.9 (2018-06-19)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
* `service/rekognition`: Updates service documentation and examples
|
||||
* Documentation updates for rekognition
|
||||
|
||||
### SDK Bugs
|
||||
* `private/model/api`: Update client ServiceName to be based on name of service for new services. ([#1997](https://github.com/aws/aws-sdk-go/pull/1997))
|
||||
* Fixes the SDK's `ServiceName` AWS service client package value to be unique based on the service name for new AWS services. Does not change exiting client packages.
|
||||
Release v1.14.8 (2018-06-15)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/mediaconvert`: Updates service API and documentation
|
||||
* This release adds language code support according to the ISO-639-3 standard. Custom 3-character language codes are now supported on input and output for both audio and captions.
|
||||
|
||||
Release v1.14.7 (2018-06-14)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/apigateway`: Updates service API and documentation
|
||||
* Support for PRIVATE endpoint configuration type
|
||||
* `service/dynamodb`: Updates service API and documentation
|
||||
* Added two new fields SSEType and KMSMasterKeyArn to SSEDescription block in describe-table output.
|
||||
* `service/iotanalytics`: Updates service API and documentation
|
||||
|
||||
Release v1.14.6 (2018-06-13)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/servicecatalog`: Updates service API
|
||||
* Introduced new length limitations for few of the product fields.
|
||||
* `service/ssm`: Updates service API and documentation
|
||||
* Added support for new parameter, CloudWatchOutputConfig, for SendCommand API. Users can now have RunCommand output sent to CloudWatchLogs.
|
||||
|
||||
Release v1.14.5 (2018-06-12)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/devicefarm`: Updates service API and documentation
|
||||
* Adding VPCEndpoint support for Remote access. Allows customers to be able to access their private endpoints/services running in their VPC during remote access.
|
||||
* `service/ecs`: Updates service API and documentation
|
||||
* Introduces daemon scheduling capability to deploy one task per instance on selected instances in a cluster. Adds a "force" flag to the DeleteService API to delete a service without requiring to scale down the number of tasks to zero.
|
||||
|
||||
### SDK Enhancements
|
||||
* `service/rds/rdsutils`: Clean up the rdsutils package and adds a new builder to construct connection strings ([#1985](https://github.com/aws/aws-sdk-go/pull/1985))
|
||||
* Rewords documentation to be more useful and provides links to prior setup needed to support authentication tokens. Introduces a builder that allows for building connection strings
|
||||
|
||||
### SDK Bugs
|
||||
* `aws/signer/v4`: Fix X-Amz-Content-Sha256 being in to query for presign ([#1976](https://github.com/aws/aws-sdk-go/pull/1976))
|
||||
* Fixes the bug which would allow the X-Amz-Content-Sha256 header to be promoted to the query string when presigning a S3 request. This bug also was preventing users from setting their own sha256 value for a presigned URL. Presigned requests generated with the custom sha256 would of always failed with invalid signature.
|
||||
* Fixes [#1974](https://github.com/aws/aws-sdk-go/pull/1974)
|
||||
Release v1.14.4 (2018-06-11)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/clouddirectory`: Updates service API and documentation
|
||||
* Amazon Cloud Directory now supports optional attributes on Typed Links, giving users the ability to associate and manage data on Typed Links.
|
||||
* `service/rds`: Updates service documentation
|
||||
* Changed lists of valid EngineVersion values to links to the RDS User Guide.
|
||||
* `service/storagegateway`: Updates service API and documentation
|
||||
* AWS Storage Gateway now enables you to create cached volumes and tapes with AWS KMS support.
|
||||
|
||||
Release v1.14.3 (2018-06-08)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/mediatailor`: Updates service API
|
||||
|
||||
Release v1.14.2 (2018-06-07)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/medialive`: Updates service API, documentation, and paginators
|
||||
* AWS Elemental MediaLive now makes channel log information available through Amazon CloudWatch Logs. You can set up each MediaLive channel with a logging level; when the channel is run, logs will automatically be published to your account on Amazon CloudWatch Logs
|
||||
|
||||
Release v1.14.1 (2018-06-05)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/ce`: Updates service API and documentation
|
||||
* `service/polly`: Updates service API and documentation
|
||||
* Amazon Polly adds new French voice - "Lea"
|
||||
* `service/rds`: Updates service API and documentation
|
||||
* This release adds customizable processor features for RDS instances.
|
||||
* `service/secretsmanager`: Updates service documentation
|
||||
* Documentation updates for secretsmanager
|
||||
* `service/shield`: Updates service API and documentation
|
||||
* DDoS Response Team access management for AWS Shield
|
||||
|
||||
Release v1.14.0 (2018-06-04)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/AWSMigrationHub`: Updates service documentation
|
||||
* `service/appstream`: Updates service API and documentation
|
||||
* Amazon AppStream 2.0 adds support for Google Drive for G Suite. With this feature, customers will be able to connect their G Suite accounts with AppStream 2.0 and enable Google Drive access for an AppStream 2.0 stack. Users of the stack can then link their Google Drive using their G Suite login credentials and use their existing files stored in Drive with their AppStream 2.0 applications. File changes will be synced automatically to Google cloud.
|
||||
* `service/ec2`: Updates service API and documentation
|
||||
* You are now able to use instance storage (up to 3600 GB of NVMe based SSD) on M5 instances, the next generation of EC2's General Purpose instances in us-east-1, us-west-2, us-east-2, eu-west-1 and ca-central-1. M5 instances offer up to 96 vCPUs, 384 GiB of DDR4 instance memory, 25 Gbps in Network bandwidth and improved EBS and Networking bandwidth on smaller instance sizes and provide a balance of compute, memory and network resources for many applications.
|
||||
* `service/eks`: Adds new service
|
||||
* `service/mediaconvert`: Updates service API and documentation
|
||||
* This release adds the support for Common Media Application Format (CMAF) fragmented outputs, RF64 WAV audio output format, and HEV1 or HEVC1 MP4 packaging types when using HEVC in DASH or CMAF outputs.
|
||||
* `service/sagemaker`: Updates service API, documentation, and paginators
|
||||
* Amazon SageMaker has added the ability to run hyperparameter tuning jobs. A hyperparameter tuning job will create and evaluate multiple training jobs while tuning algorithm hyperparameters, to optimize a customer specified objective metric.
|
||||
|
||||
### SDK Features
|
||||
* Add support for EventStream based APIs (S3 SelectObjectContent) ([#1941](https://github.com/aws/aws-sdk-go/pull/1941))
|
||||
* Adds support for EventStream asynchronous APIs such as S3 SelectObjectContents API. This API allows your application to receiving multiple events asynchronously from the API response. Your application recieves these events from a channel on the API response.
|
||||
* See PR [#1941](https://github.com/aws/aws-sdk-go/pull/1941) for example.
|
||||
* Fixes [#1895](https://github.com/aws/aws-sdk-go/issues/1895)
|
||||
|
||||
Release v1.13.60 (2018-06-01)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/ds`: Updates service API and documentation
|
||||
* Added ResetUserPassword API. Customers can now reset their users' passwords without providing the old passwords in Simple AD and Microsoft AD.
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
* `service/iot`: Updates service API and documentation
|
||||
* We are releasing force CancelJob and CancelJobExecution functionalities to customers.
|
||||
* `service/mediatailor`: Adds new service
|
||||
* `service/redshift`: Updates service documentation
|
||||
* Documentation updates for redshift
|
||||
* `service/sns`: Updates service API, documentation, and paginators
|
||||
* The SNS Subscribe API has been updated with two new optional parameters: Attributes and ReturnSubscriptionArn. Attributes is a map of subscription attributes which can be one or more of: FilterPolicy, DeliveryPolicy, and RawMessageDelivery. ReturnSubscriptionArn is a boolean parameter that overrides the default behavior of returning "pending confirmation" for subscriptions that require confirmation instead of returning the subscription ARN.
|
||||
|
||||
### SDK Bugs
|
||||
* `private/mode/api`: Fix error code constants being generated incorrectly.([#1958](https://github.com/aws/aws-sdk-go/issues/1958))
|
||||
* Fixes the SDK's code generation to not modify the error code text value when generating error code constants. This prevents generating error code values which are invalid and will never be sent by the service. This change does not change the error code constant variable name generated by the SDK, only the value of the error code.
|
||||
* Fixes [#1856](https://github.com/aws/aws-sdk-go/issues/1856)
|
||||
Release v1.13.59 (2018-05-31)
|
||||
===
|
||||
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
Release v1.13.58 (2018-05-30)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/elasticloadbalancingv2`: Updates service API and documentation
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
* `service/neptune`: Adds new service
|
||||
* Amazon Neptune is a fast, reliable graph database service that makes it easy to build and run applications that work with highly connected datasets. Neptune supports popular graph models Property Graph and W3C's Resource Description Frame (RDF), and their respective query languages Apache TinkerPop Gremlin 3.3.2 and SPARQL 1.1.
|
||||
|
||||
Release v1.13.57 (2018-05-29)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
* `service/pi`: Adds new service
|
||||
|
||||
Release v1.13.56 (2018-05-25)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/appstream`: Updates service API and documentation
|
||||
* This API update enables customers to control whether users can transfer data between their local devices and their streaming applications through file uploads and downloads, clipboard operations, or printing to local devices
|
||||
* `service/config`: Updates service API and documentation
|
||||
* `service/glue`: Updates service API and documentation
|
||||
* AWS Glue now sends a delay notification to Amazon CloudWatch Events when an ETL job runs longer than the specified delay notification threshold.
|
||||
* `service/iot`: Updates service API
|
||||
* We are exposing DELETION_IN_PROGRESS as a new job status in regards to the release of DeleteJob API.
|
||||
|
||||
Release v1.13.55 (2018-05-24)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/codebuild`: Updates service API
|
||||
* AWS CodeBuild Adds Support for Windows Builds.
|
||||
* `service/elasticloadbalancingv2`: Updates service documentation
|
||||
* `service/rds`: Updates service API and documentation
|
||||
* This release adds CloudWatch Logs integration capabilities to RDS Aurora MySQL clusters
|
||||
* `service/secretsmanager`: Updates service documentation
|
||||
* Documentation updates for secretsmanager
|
||||
|
||||
### SDK Bugs
|
||||
* `service/cloudwatchlogs`: Fix pagination with cloudwatchlogs ([#1945](https://github.com/aws/aws-sdk-go/pull/1945))
|
||||
* Fixes the SDK's behavior with CloudWatchLogs APIs which return duplicate `NextToken` values to signal end of pagination.
|
||||
* Fixes [#1908](https://github.com/aws/aws-sdk-go/pull/1908)
|
||||
|
||||
Release v1.13.54 (2018-05-22)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/ecs`: Updates service API and documentation
|
||||
* Amazon Elastic Container Service (ECS) adds service discovery for services that use host or bridged network mode. ECS can now also register instance IPs for active tasks using bridged and host networking with Route 53, making them available via DNS.
|
||||
* `service/inspector`: Updates service API
|
||||
* We are launching the ability to target all EC2 instances. With this launch, resourceGroupArn is now optional for CreateAssessmentTarget and UpdateAssessmentTarget. If resourceGroupArn is not specified, all EC2 instances in the account in the AWS region are included in the assessment target.
|
||||
|
||||
Release v1.13.53 (2018-05-21)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/cloudformation`: Updates service API and documentation
|
||||
* 1) Filtered Update for StackSet based on Accounts and Regions: This feature will allow flexibility for the customers to roll out updates on a StackSet based on specific Accounts and Regions. 2) Support for customized ExecutionRoleName: This feature will allow customers to attach ExecutionRoleName to the StackSet thus ensuring more security and controlling the behavior of any AWS resources in the target accounts.
|
||||
|
||||
Release v1.13.52 (2018-05-18)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/email`: Updates service documentation
|
||||
* Fixed a broken link in the documentation for S3Action.
|
||||
* `service/iot`: Updates service API and documentation
|
||||
* We are releasing DeleteJob and DeleteJobExecution APIs to allow customer to delete resources created using AWS IoT Jobs.
|
||||
|
||||
Release v1.13.51 (2018-05-17)
|
||||
===
|
||||
|
||||
### Service Client Updates
|
||||
* `service/codedeploy`: Updates service documentation
|
||||
* Documentation updates for codedeploy
|
||||
* `service/cognito-idp`: Updates service API and documentation
|
||||
* `service/ec2`: Updates service API and documentation
|
||||
* You are now able to use instance storage (up to 1800 GB of NVMe based SSD) on C5 instances, the next generation of EC2's compute optimized instances in us-east-1, us-west-2, us-east-2, eu-west-1 and ca-central-1. C5 instances offer up to 72 vCPUs, 144 GiB of DDR4 instance memory, 25 Gbps in Network bandwidth and improved EBS and Networking bandwidth on smaller instance sizes to deliver improved performance for compute-intensive workloads.You can now run bare metal workloads on EC2 with i3.metal instances. As a new instance size belonging to the I3 instance family, i3.metal instances have the same characteristics as other instances in the family, including NVMe SSD-backed instance storage optimized for low latency, very high random I/O performance, and high sequential read throughput. I3.metal instances are powered by 2.3 GHz Intel Xeon processors, offering 36 hyper-threaded cores (72 logical processors), 512 GiB of memory, and 15.2 TB of NVMe SSD-backed instance storage. These instances deliver high networking throughput and lower latency with up to 25 Gbps of aggregate network bandwidth using Elastic Network Adapter (ENA)-based Enhanced Networking.
|
||||
|
||||
Release v1.13.50 (2018-05-16)
|
||||
===
|
||||
|
||||
|
@ -341,7 +644,7 @@ Release v1.13.19 (2018-03-22)
|
|||
|
||||
### SDK Bugs
|
||||
* `aws/endpoints`: Use service metadata for fallback signing name ([#1854](https://github.com/aws/aws-sdk-go/pull/1854))
|
||||
* Updates the SDK's endpoint resolution to fallback deriving the service's signing name from the service's modeled metadata in addition the the endpoints modeled data.
|
||||
* Updates the SDK's endpoint resolution to fallback deriving the service's signing name from the service's modeled metadata in addition the endpoints modeled data.
|
||||
* Fixes [#1850](https://github.com/aws/aws-sdk-go/issues/1850)
|
||||
Release v1.13.18 (2018-03-21)
|
||||
===
|
||||
|
@ -1078,7 +1381,7 @@ Release v1.12.31 (2017-11-20)
|
|||
* DescribeGroups API and miscellaneous enhancements
|
||||
|
||||
### SDK Bugs
|
||||
* `aws/client`: Retry delays for throttled exception were not limited to 5 mintues [#1654](https://github.com/aws/aws-sdk-go/pull/1654)
|
||||
* `aws/client`: Retry delays for throttled exception were not limited to 5 minutes [#1654](https://github.com/aws/aws-sdk-go/pull/1654)
|
||||
* Fixes [#1653](https://github.com/aws/aws-sdk-go/issues/1653)
|
||||
Release v1.12.30 (2017-11-17)
|
||||
===
|
||||
|
@ -1210,7 +1513,7 @@ Release v1.12.21 (2017-11-02)
|
|||
* `aws/endpoints`: Updated Regions and Endpoints metadata.
|
||||
|
||||
### SDK Bugs
|
||||
* `aws/request`: Fix bug in request presign creating invalide URL ([#1624](https://github.com/aws/aws-sdk-go/pull/1624))
|
||||
* `aws/request`: Fix bug in request presign creating invalid URL ([#1624](https://github.com/aws/aws-sdk-go/pull/1624))
|
||||
* Fixes a bug the Request Presign and PresignRequest methods that would allow a invalid expire duration as input. A expire time of 0 would be interpreted by the SDK to generate a normal request signature, not a presigned URL. This caused the returned URL unusable.
|
||||
* Fixes [#1617](https://github.com/aws/aws-sdk-go/issues/1617)
|
||||
Release v1.12.20 (2017-11-01)
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/CONTRIBUTING.md
generated
vendored
|
@ -89,7 +89,7 @@ go test -tags codegen ./private/...
|
|||
See the `Makefile` for additional testing tags that can be used in testing.
|
||||
|
||||
To test on multiple platform the SDK includes several DockerFiles under the
|
||||
`awstesting/sandbox` folder, and associated make recipes to to execute
|
||||
`awstesting/sandbox` folder, and associated make recipes to execute
|
||||
unit testing within environments configured for specific Go versions.
|
||||
|
||||
```
|
||||
|
|
26
vendor/github.com/aws/aws-sdk-go/Makefile
generated
vendored
26
vendor/github.com/aws/aws-sdk-go/Makefile
generated
vendored
|
@ -13,6 +13,7 @@ SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/")
|
|||
SDK_UNIT_TEST_ONLY_PKGS=$(shell go list -tags ${UNIT_TEST_TAGS} ./... | grep -v "/vendor/")
|
||||
SDK_GO_1_4=$(shell go version | grep "go1.4")
|
||||
SDK_GO_1_5=$(shell go version | grep "go1.5")
|
||||
SDK_GO_1_6=$(shell go version | grep "go1.6")
|
||||
SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''')
|
||||
|
||||
all: get-deps generate unit
|
||||
|
@ -35,7 +36,7 @@ help:
|
|||
@echo " get-deps-tests to get the SDK's test dependencies"
|
||||
@echo " get-deps-verify to get the SDK's verification dependencies"
|
||||
|
||||
generate: gen-test gen-endpoints gen-services
|
||||
generate: cleanup-models gen-test gen-endpoints gen-services
|
||||
|
||||
gen-test: gen-protocol-test
|
||||
|
||||
|
@ -48,6 +49,10 @@ gen-protocol-test:
|
|||
gen-endpoints:
|
||||
go generate ./models/endpoints/
|
||||
|
||||
cleanup-models:
|
||||
@echo "Cleaning up stale model versions"
|
||||
@./cleanup_models.sh
|
||||
|
||||
build:
|
||||
@echo "go build SDK and vendor packages"
|
||||
@go build ${SDK_ONLY_PKGS}
|
||||
|
@ -60,6 +65,18 @@ unit-with-race-cover: get-deps-tests build verify
|
|||
@echo "go test SDK and vendor packages"
|
||||
@go test -tags ${UNIT_TEST_TAGS} -race -cpu=1,2,4 $(SDK_UNIT_TEST_ONLY_PKGS)
|
||||
|
||||
ci-test: ci-test-generate unit-with-race-cover ci-test-generate-validate
|
||||
|
||||
ci-test-generate: get-deps
|
||||
@echo "CI test generated code"
|
||||
@if [ \( -z "${SDK_GO_1_6}" \) -a \( -z "${SDK_GO_1_5}" \) ]; then make generate; else echo "skipping generate"; fi
|
||||
|
||||
ci-test-generate-validate:
|
||||
@echo "CI test validate no generated code changes"
|
||||
@gitstatus=`if [ \( -z "${SDK_GO_1_6}" \) -a \( -z "${SDK_GO_1_5}" \) ]; then git status --porcelain; else echo "skipping validation"; fi`; \
|
||||
echo "$$gitstatus"; \
|
||||
if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then git diff; exit 1; fi
|
||||
|
||||
integration: get-deps-tests integ-custom smoke-tests performance
|
||||
|
||||
integ-custom:
|
||||
|
@ -118,6 +135,13 @@ sandbox-go19: sandbox-build-go19
|
|||
sandbox-test-go19: sandbox-build-go19
|
||||
docker run -t aws-sdk-go-1.9
|
||||
|
||||
sandbox-build-go110:
|
||||
docker build -f ./awstesting/sandbox/Dockerfile.test.go1.8 -t "aws-sdk-go-1.10" .
|
||||
sandbox-go110: sandbox-build-go110
|
||||
docker run -i -t aws-sdk-go-1.10 bash
|
||||
sandbox-test-go110: sandbox-build-go110
|
||||
docker run -t aws-sdk-go-1.10
|
||||
|
||||
sandbox-build-gotip:
|
||||
@echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container"
|
||||
docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" .
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/README.md
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/README.md
generated
vendored
|
@ -334,7 +334,7 @@ take a callback function that will be called for each page of the API's response
|
|||
```
|
||||
|
||||
Waiter helper methods provide the functionality to wait for an AWS resource
|
||||
state. These methods abstract the logic needed to to check the state of an
|
||||
state. These methods abstract the logic needed to check the state of an
|
||||
AWS resource, and wait until that resource is in a desired state. The waiter
|
||||
will block until the resource is in the state that is desired, an error occurs,
|
||||
or the waiter times out. If a resource times out the error code returned will
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
|
@ -91,6 +91,6 @@ func (c *Client) AddDebugHandlers() {
|
|||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
|
||||
c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
||||
|
|
102
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
102
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
|
@ -44,12 +44,22 @@ func (reader *teeReaderCloser) Close() error {
|
|||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -63,7 +73,28 @@ func logRequest(r *request.Request) {
|
|||
r.ResetBody()
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
|
@ -76,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
|||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
body, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
|
||||
if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
|
@ -110,3 +158,27 @@ func logResponse(r *request.Request) {
|
|||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
|
78
vendor/github.com/aws/aws-sdk-go/aws/client/logger_test.go
generated
vendored
78
vendor/github.com/aws/aws-sdk-go/aws/client/logger_test.go
generated
vendored
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
@ -127,6 +128,83 @@ func TestLogRequest(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLogResponse(t *testing.T) {
|
||||
cases := []struct {
|
||||
Body *bytes.Buffer
|
||||
ExpectBody []byte
|
||||
ReadBody bool
|
||||
LogLevel aws.LogLevelType
|
||||
}{
|
||||
{
|
||||
Body: bytes.NewBuffer([]byte("body content")),
|
||||
ExpectBody: []byte("body content"),
|
||||
},
|
||||
{
|
||||
Body: bytes.NewBuffer([]byte("body content")),
|
||||
LogLevel: aws.LogDebug,
|
||||
ExpectBody: []byte("body content"),
|
||||
},
|
||||
{
|
||||
Body: bytes.NewBuffer([]byte("body content")),
|
||||
LogLevel: aws.LogDebugWithHTTPBody,
|
||||
ReadBody: true,
|
||||
ExpectBody: []byte("body content"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
var logW bytes.Buffer
|
||||
req := request.New(
|
||||
aws.Config{
|
||||
Credentials: credentials.AnonymousCredentials,
|
||||
Logger: &bufLogger{w: &logW},
|
||||
LogLevel: aws.LogLevel(c.LogLevel),
|
||||
},
|
||||
metadata.ClientInfo{
|
||||
Endpoint: "https://mock-service.mock-region.amazonaws.com",
|
||||
},
|
||||
testHandlers(),
|
||||
nil,
|
||||
&request.Operation{
|
||||
Name: "APIName",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
},
|
||||
struct{}{}, nil,
|
||||
)
|
||||
req.HTTPResponse = &http.Response{
|
||||
StatusCode: 200,
|
||||
Status: "OK",
|
||||
Header: http.Header{
|
||||
"ABC": []string{"123"},
|
||||
},
|
||||
Body: ioutil.NopCloser(c.Body),
|
||||
}
|
||||
|
||||
logResponse(req)
|
||||
req.Handlers.Unmarshal.Run(req)
|
||||
|
||||
if c.ReadBody {
|
||||
if e, a := len(c.ExpectBody), c.Body.Len(); e != a {
|
||||
t.Errorf("%d, expect orginal body not to of been read", i)
|
||||
}
|
||||
}
|
||||
|
||||
if logW.Len() == 0 {
|
||||
t.Errorf("%d, expect HTTP Response headers to be logged", i)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(req.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("%d, expect to read SDK request Body", i)
|
||||
}
|
||||
|
||||
if e, a := c.ExpectBody, b; !bytes.Equal(e, a) {
|
||||
t.Errorf("%d, expect %v body, got %v", i, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type bufLogger struct {
|
||||
w *bytes.Buffer
|
||||
}
|
||||
|
|
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
|
@ -3,6 +3,7 @@ package metadata
|
|||
// ClientInfo wraps immutable data from the client.Client structure.
|
||||
type ClientInfo struct {
|
||||
ServiceName string
|
||||
ServiceID string
|
||||
APIVersion string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
|
@ -178,7 +178,8 @@ func (e *Expiry) IsExpired() bool {
|
|||
type Credentials struct {
|
||||
creds Value
|
||||
forceRefresh bool
|
||||
m sync.Mutex
|
||||
|
||||
m sync.RWMutex
|
||||
|
||||
provider Provider
|
||||
}
|
||||
|
@ -201,6 +202,17 @@ func NewCredentials(provider Provider) *Credentials {
|
|||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
// Check the cached credentials first with just the read lock.
|
||||
c.m.RLock()
|
||||
if !c.isExpired() {
|
||||
creds := c.creds
|
||||
c.m.RUnlock()
|
||||
return creds, nil
|
||||
}
|
||||
c.m.RUnlock()
|
||||
|
||||
// Credentials are expired need to retrieve the credentials taking the full
|
||||
// lock.
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
|
@ -234,8 +246,8 @@ func (c *Credentials) Expire() {
|
|||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpired()
|
||||
}
|
||||
|
|
90
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_bench_test.go
generated
vendored
Normal file
90
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials_bench_test.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
// +build go1.9
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkCredentials_Get(b *testing.B) {
|
||||
stub := &stubProvider{}
|
||||
|
||||
cases := []int{1, 10, 100, 500, 1000, 10000}
|
||||
|
||||
for _, c := range cases {
|
||||
b.Run(strconv.Itoa(c), func(b *testing.B) {
|
||||
creds := NewCredentials(stub)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(c)
|
||||
for i := 0; i < c; i++ {
|
||||
go func() {
|
||||
for j := 0; j < b.N; j++ {
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
b.Fatalf("expect no error %v, %v", v, err)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCredentials_Get_Expire(b *testing.B) {
|
||||
p := &blockProvider{}
|
||||
|
||||
expRates := []int{10000, 1000, 100}
|
||||
cases := []int{1, 10, 100, 500, 1000, 10000}
|
||||
|
||||
for _, expRate := range expRates {
|
||||
for _, c := range cases {
|
||||
b.Run(fmt.Sprintf("%d-%d", expRate, c), func(b *testing.B) {
|
||||
creds := NewCredentials(p)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(c)
|
||||
for i := 0; i < c; i++ {
|
||||
go func(id int) {
|
||||
for j := 0; j < b.N; j++ {
|
||||
v, err := creds.Get()
|
||||
if err != nil {
|
||||
b.Fatalf("expect no error %v, %v", v, err)
|
||||
}
|
||||
// periodically expire creds to cause rwlock
|
||||
if id == 0 && j%expRate == 0 {
|
||||
creds.Expire()
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type blockProvider struct {
|
||||
creds Value
|
||||
expired bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *blockProvider) Retrieve() (Value, error) {
|
||||
s.expired = false
|
||||
s.creds.ProviderName = "blockProvider"
|
||||
time.Sleep(time.Millisecond)
|
||||
return s.creds, s.err
|
||||
}
|
||||
func (s *blockProvider) IsExpired() bool {
|
||||
return s.expired
|
||||
}
|
46
vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
46
vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
|
||||
// via UDP connection. Using the Start function will enable the reporting of
|
||||
// metrics on a given port. If Start is called, with different parameters, again,
|
||||
// a panic will occur.
|
||||
//
|
||||
// Pause can be called to pause any metrics publishing on a given port. Sessions
|
||||
// that have had their handlers modified via InjectHandlers may still be used.
|
||||
// However, the handlers will act as a no-op meaning no metrics will be published.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", ":31000")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed starting CSM: %v", err))
|
||||
// }
|
||||
//
|
||||
// sess, err := session.NewSession(&aws.Config{})
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed loading session: %v", err))
|
||||
// }
|
||||
//
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// client := s3.New(sess)
|
||||
// resp, err := client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Will pause monitoring
|
||||
// r.Pause()
|
||||
// resp, err = client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Resume monitoring
|
||||
// r.Continue()
|
||||
//
|
||||
// Start returns a Reporter that is used to enable or disable monitoring. If
|
||||
// access to the Reporter is required later, calling Get will return the Reporter
|
||||
// singleton.
|
||||
//
|
||||
// Example:
|
||||
// r := csm.Get()
|
||||
// r.Continue()
|
||||
package csm
|
67
vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
67
vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// Client side metric handler names
|
||||
const (
|
||||
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
|
||||
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
|
||||
)
|
||||
|
||||
// Start will start the a long running go routine to capture
|
||||
// client side metrics. Calling start multiple time will only
|
||||
// start the metric listener once and will panic if a different
|
||||
// client ID or port is passed in.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(sess.Handlers)
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
func Start(clientID string, url string) (*Reporter, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if sender == nil {
|
||||
sender = newReporter(clientID, url)
|
||||
} else {
|
||||
if sender.clientID != clientID {
|
||||
panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
|
||||
}
|
||||
|
||||
if sender.url != url {
|
||||
panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
|
||||
}
|
||||
}
|
||||
|
||||
if err := connect(url); err != nil {
|
||||
sender = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// Get will return a reporter if one exists, if one does not exist, nil will
|
||||
// be returned.
|
||||
func Get() *Reporter {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
return sender
|
||||
}
|
74
vendor/github.com/aws/aws-sdk-go/aws/csm/enable_test.go
generated
vendored
Normal file
74
vendor/github.com/aws/aws-sdk-go/aws/csm/enable_test.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func startUDPServer(done chan struct{}, fn func([]byte)) (string, error) {
|
||||
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
n, _, err := conn.ReadFromUDP(buf)
|
||||
fn(buf[:n])
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return conn.LocalAddr().String(), nil
|
||||
}
|
||||
|
||||
func TestDifferentParams(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("expected panic with different parameters")
|
||||
}
|
||||
}()
|
||||
Start("clientID2", ":0")
|
||||
}
|
||||
|
||||
var MetricsCh = make(chan map[string]interface{}, 1)
|
||||
var Done = make(chan struct{})
|
||||
|
||||
func init() {
|
||||
url, err := startUDPServer(Done, func(b []byte) {
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
panic(fmt.Sprintf("expected no error, but received %v", err))
|
||||
}
|
||||
|
||||
MetricsCh <- m
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = Start("clientID", url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
40
vendor/github.com/aws/aws-sdk-go/aws/csm/example_test.go
generated
vendored
Normal file
40
vendor/github.com/aws/aws-sdk-go/aws/csm/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package csm_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/csm"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
func ExampleStart() {
|
||||
r, err := csm.Start("clientID", ":31000")
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed starting CSM: %v", err))
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed loading session: %v", err))
|
||||
}
|
||||
|
||||
r.InjectHandlers(&sess.Handlers)
|
||||
|
||||
client := s3.New(sess)
|
||||
client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("key"),
|
||||
})
|
||||
|
||||
// Pauses monitoring
|
||||
r.Pause()
|
||||
client.GetObject(&s3.GetObjectInput{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("key"),
|
||||
})
|
||||
|
||||
// Resume monitoring
|
||||
r.Continue()
|
||||
}
|
51
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
51
vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type metricTime time.Time
|
||||
|
||||
func (t metricTime) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(time.Time(t).UnixNano())
|
||||
return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
ClientID *string `json:"ClientId,omitempty"`
|
||||
API *string `json:"Api,omitempty"`
|
||||
Service *string `json:"Service,omitempty"`
|
||||
Timestamp *metricTime `json:"Timestamp,omitempty"`
|
||||
Type *string `json:"Type,omitempty"`
|
||||
Version *int `json:"Version,omitempty"`
|
||||
|
||||
AttemptCount *int `json:"AttemptCount,omitempty"`
|
||||
Latency *int `json:"Latency,omitempty"`
|
||||
|
||||
Fqdn *string `json:"Fqdn,omitempty"`
|
||||
UserAgent *string `json:"UserAgent,omitempty"`
|
||||
AttemptLatency *int `json:"AttemptLatency,omitempty"`
|
||||
|
||||
SessionToken *string `json:"SessionToken,omitempty"`
|
||||
Region *string `json:"Region,omitempty"`
|
||||
AccessKey *string `json:"AccessKey,omitempty"`
|
||||
HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
|
||||
XAmzID2 *string `json:"XAmzId2,omitempty"`
|
||||
XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
|
||||
|
||||
AWSException *string `json:"AwsException,omitempty"`
|
||||
AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
|
||||
SDKException *string `json:"SdkException,omitempty"`
|
||||
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
|
||||
|
||||
DestinationIP *string `json:"DestinationIp,omitempty"`
|
||||
ConnectionReused *int `json:"ConnectionReused,omitempty"`
|
||||
|
||||
AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
|
||||
ConnectLatency *int `json:"ConnectLatency,omitempty"`
|
||||
RequestLatency *int `json:"RequestLatency,omitempty"`
|
||||
DNSLatency *int `json:"DnsLatency,omitempty"`
|
||||
TCPLatency *int `json:"TcpLatency,omitempty"`
|
||||
SSLLatency *int `json:"SslLatency,omitempty"`
|
||||
}
|
54
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
54
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
runningEnum = iota
|
||||
pausedEnum
|
||||
)
|
||||
|
||||
var (
|
||||
// MetricsChannelSize of metrics to hold in the channel
|
||||
MetricsChannelSize = 100
|
||||
)
|
||||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(&ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
// Push will push metrics to the metric channel if the channel
|
||||
// is not paused
|
||||
func (ch *metricChan) Push(m metric) bool {
|
||||
if ch.IsPaused() {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case ch.ch <- m:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
72
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan_test.go
generated
vendored
Normal file
72
vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan_test.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMetricChanPush(t *testing.T) {
|
||||
ch := newMetricChan(5)
|
||||
defer close(ch.ch)
|
||||
|
||||
pushed := ch.Push(metric{})
|
||||
if !pushed {
|
||||
t.Errorf("expected metrics to be pushed")
|
||||
}
|
||||
|
||||
if e, a := 1, len(ch.ch); e != a {
|
||||
t.Errorf("expected %d, but received %d", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricChanPauseContinue(t *testing.T) {
|
||||
ch := newMetricChan(5)
|
||||
defer close(ch.ch)
|
||||
ch.Pause()
|
||||
|
||||
if !ch.IsPaused() {
|
||||
t.Errorf("expected to be paused, but did not pause properly")
|
||||
}
|
||||
|
||||
ch.Continue()
|
||||
if ch.IsPaused() {
|
||||
t.Errorf("expected to be not paused, but did not continue properly")
|
||||
}
|
||||
|
||||
pushed := ch.Push(metric{})
|
||||
if !pushed {
|
||||
t.Errorf("expected metrics to be pushed")
|
||||
}
|
||||
|
||||
if e, a := 1, len(ch.ch); e != a {
|
||||
t.Errorf("expected %d, but received %d", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricChanPushWhenPaused(t *testing.T) {
|
||||
ch := newMetricChan(5)
|
||||
defer close(ch.ch)
|
||||
ch.Pause()
|
||||
|
||||
pushed := ch.Push(metric{})
|
||||
if pushed {
|
||||
t.Errorf("expected metrics to not be pushed")
|
||||
}
|
||||
|
||||
if e, a := 0, len(ch.ch); e != a {
|
||||
t.Errorf("expected %d, but received %d", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricChanNonBlocking(t *testing.T) {
|
||||
ch := newMetricChan(0)
|
||||
defer close(ch.ch)
|
||||
|
||||
pushed := ch.Push(metric{})
|
||||
if pushed {
|
||||
t.Errorf("expected metrics to be not pushed")
|
||||
}
|
||||
|
||||
if e, a := 0, len(ch.ch); e != a {
|
||||
t.Errorf("expected %d, but received %d", e, a)
|
||||
}
|
||||
}
|
232
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
232
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
|
@ -0,0 +1,232 @@
|
|||
package csm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPort is used when no port is specified
|
||||
DefaultPort = "31000"
|
||||
)
|
||||
|
||||
// Reporter will gather metrics of API requests made and
|
||||
// send those metrics to the CSM endpoint.
|
||||
type Reporter struct {
|
||||
clientID string
|
||||
url string
|
||||
conn net.Conn
|
||||
metricsCh metricChan
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
sender *Reporter
|
||||
)
|
||||
|
||||
func connect(url string) error {
|
||||
const network = "udp"
|
||||
if err := sender.connect(network, url); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sender.done == nil {
|
||||
sender.done = make(chan struct{})
|
||||
go sender.start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newReporter(clientID, url string) *Reporter {
|
||||
return &Reporter{
|
||||
clientID: clientID,
|
||||
url: url,
|
||||
metricsCh: newMetricChan(MetricsChannelSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
creds, _ := r.Config.Credentials.Get()
|
||||
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Region: r.Config.Region,
|
||||
Type: aws.String("ApiCallAttempt"),
|
||||
Version: aws.Int(1),
|
||||
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
|
||||
AccessKey: aws.String(creds.AccessKeyID),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
setError(&m, awserr)
|
||||
}
|
||||
}
|
||||
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func setError(m *metric, err awserr.Error) {
|
||||
msg := err.Message()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case "RequestError",
|
||||
"SerializationError",
|
||||
request.CanceledErrorCode:
|
||||
|
||||
m.SDKException = &code
|
||||
m.SDKExceptionMessage = &msg
|
||||
default:
|
||||
m.AWSException = &code
|
||||
m.AWSExceptionMessage = &msg
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
Type: aws.String("ApiCall"),
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
}
|
||||
|
||||
// TODO: Probably want to figure something out for logging dropped
|
||||
// metrics
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func (rep *Reporter) connect(network, url string) error {
|
||||
if rep.conn != nil {
|
||||
rep.conn.Close()
|
||||
}
|
||||
|
||||
conn, err := net.Dial(network, url)
|
||||
if err != nil {
|
||||
return awserr.New("UDPError", "Could not connect", err)
|
||||
}
|
||||
|
||||
rep.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rep *Reporter) close() {
|
||||
if rep.done != nil {
|
||||
close(rep.done)
|
||||
}
|
||||
|
||||
rep.metricsCh.Pause()
|
||||
}
|
||||
|
||||
func (rep *Reporter) start() {
|
||||
defer func() {
|
||||
rep.metricsCh.Pause()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rep.done:
|
||||
rep.done = nil
|
||||
return
|
||||
case m := <-rep.metricsCh.ch:
|
||||
// TODO: What to do with this error? Probably should just log
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rep.conn.Write(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause will pause the metric channel preventing any new metrics from
|
||||
// being added.
|
||||
func (rep *Reporter) Pause() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rep.close()
|
||||
}
|
||||
|
||||
// Continue will reopen the metric channel and allow for monitoring
|
||||
// to be resumed.
|
||||
func (rep *Reporter) Continue() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !rep.metricsCh.IsPaused() {
|
||||
return
|
||||
}
|
||||
|
||||
rep.metricsCh.Continue()
|
||||
}
|
||||
|
||||
// InjectHandlers will will enable client side metrics and inject the proper
|
||||
// handlers to handle how metrics are sent.
|
||||
//
|
||||
// Example:
|
||||
// // Start must be called in order to inject the correct handlers
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
//
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// // create a new service client with our client side metric session
|
||||
// svc := s3.New(sess)
|
||||
func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
|
||||
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
|
||||
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
|
||||
|
||||
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
|
||||
}
|
249
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter_test.go
generated
vendored
Normal file
249
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter_test.go
generated
vendored
Normal file
|
@ -0,0 +1,249 @@
|
|||
package csm_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/csm"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
||||
)
|
||||
|
||||
func startUDPServer(done chan struct{}, fn func([]byte)) (string, error) {
|
||||
addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp", addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
i := 0
|
||||
go func() {
|
||||
defer conn.Close()
|
||||
for {
|
||||
i++
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
n, _, err := conn.ReadFromUDP(buf)
|
||||
fn(buf[:n])
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return conn.LocalAddr().String(), nil
|
||||
}
|
||||
|
||||
func TestReportingMetrics(t *testing.T) {
|
||||
reporter := csm.Get()
|
||||
if reporter == nil {
|
||||
t.Errorf("expected non-nil reporter")
|
||||
}
|
||||
|
||||
sess := session.New()
|
||||
sess.Handlers.Clear()
|
||||
reporter.InjectHandlers(&sess.Handlers)
|
||||
|
||||
md := metadata.ClientInfo{}
|
||||
op := &request.Operation{}
|
||||
r := request.New(*sess.Config, md, sess.Handlers, client.DefaultRetryer{NumMaxRetries: 0}, op, nil, nil)
|
||||
sess.Handlers.Complete.Run(r)
|
||||
|
||||
foundAttempt := false
|
||||
foundCall := false
|
||||
|
||||
expectedMetrics := 2
|
||||
|
||||
for i := 0; i < expectedMetrics; i++ {
|
||||
m := <-csm.MetricsCh
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "Type":
|
||||
a := v.(string)
|
||||
foundCall = foundCall || a == "ApiCall"
|
||||
foundAttempt = foundAttempt || a == "ApiCallAttempt"
|
||||
|
||||
if prefix := "ApiCall"; !strings.HasPrefix(a, prefix) {
|
||||
t.Errorf("expected 'APICall' prefix, but received %q", a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !foundAttempt {
|
||||
t.Errorf("expected attempt event to have occurred")
|
||||
}
|
||||
|
||||
if !foundCall {
|
||||
t.Errorf("expected call event to have occurred")
|
||||
}
|
||||
}
|
||||
|
||||
type mockService struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
type input struct{}
|
||||
type output struct{}
|
||||
|
||||
func (s *mockService) Request(i input) *request.Request {
|
||||
op := &request.Operation{
|
||||
Name: "foo",
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
o := output{}
|
||||
req := s.NewRequest(op, &i, &o)
|
||||
return req
|
||||
}
|
||||
|
||||
func BenchmarkWithCSM(b *testing.B) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(fmt.Sprintf("{}")))
|
||||
}))
|
||||
|
||||
cfg := aws.Config{
|
||||
Endpoint: aws.String(server.URL),
|
||||
}
|
||||
|
||||
sess := session.New(&cfg)
|
||||
r := csm.Get()
|
||||
|
||||
r.InjectHandlers(&sess.Handlers)
|
||||
|
||||
c := sess.ClientConfig("id", &cfg)
|
||||
|
||||
svc := mockService{
|
||||
client.New(
|
||||
*c.Config,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: "service",
|
||||
ServiceID: "id",
|
||||
SigningName: "signing",
|
||||
SigningRegion: "region",
|
||||
Endpoint: server.URL,
|
||||
APIVersion: "0",
|
||||
JSONVersion: "1.1",
|
||||
TargetPrefix: "prefix",
|
||||
},
|
||||
c.Handlers,
|
||||
),
|
||||
}
|
||||
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
req := svc.Request(input{})
|
||||
req.Send()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWithCSMNoUDPConnection(b *testing.B) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(fmt.Sprintf("{}")))
|
||||
}))
|
||||
|
||||
cfg := aws.Config{
|
||||
Endpoint: aws.String(server.URL),
|
||||
}
|
||||
|
||||
sess := session.New(&cfg)
|
||||
r := csm.Get()
|
||||
r.Pause()
|
||||
r.InjectHandlers(&sess.Handlers)
|
||||
defer r.Pause()
|
||||
|
||||
c := sess.ClientConfig("id", &cfg)
|
||||
|
||||
svc := mockService{
|
||||
client.New(
|
||||
*c.Config,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: "service",
|
||||
ServiceID: "id",
|
||||
SigningName: "signing",
|
||||
SigningRegion: "region",
|
||||
Endpoint: server.URL,
|
||||
APIVersion: "0",
|
||||
JSONVersion: "1.1",
|
||||
TargetPrefix: "prefix",
|
||||
},
|
||||
c.Handlers,
|
||||
),
|
||||
}
|
||||
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
req := svc.Request(input{})
|
||||
req.Send()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWithoutCSM(b *testing.B) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(fmt.Sprintf("{}")))
|
||||
}))
|
||||
|
||||
cfg := aws.Config{
|
||||
Endpoint: aws.String(server.URL),
|
||||
}
|
||||
sess := session.New(&cfg)
|
||||
c := sess.ClientConfig("id", &cfg)
|
||||
|
||||
svc := mockService{
|
||||
client.New(
|
||||
*c.Config,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: "service",
|
||||
ServiceID: "id",
|
||||
SigningName: "signing",
|
||||
SigningRegion: "region",
|
||||
Endpoint: server.URL,
|
||||
APIVersion: "0",
|
||||
JSONVersion: "1.1",
|
||||
TargetPrefix: "prefix",
|
||||
},
|
||||
c.Handlers,
|
||||
),
|
||||
}
|
||||
|
||||
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
|
||||
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
|
||||
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
|
||||
svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
req := svc.Request(input{})
|
||||
req.Send()
|
||||
}
|
||||
}
|
86
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
86
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -48,6 +48,7 @@ const (
|
|||
A4bServiceID = "a4b" // A4b.
|
||||
AcmServiceID = "acm" // Acm.
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
|
@ -130,6 +131,7 @@ const (
|
|||
ModelsLexServiceID = "models.lex" // ModelsLex.
|
||||
MonitoringServiceID = "monitoring" // Monitoring.
|
||||
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
|
||||
NeptuneServiceID = "neptune" // Neptune.
|
||||
OpsworksServiceID = "opsworks" // Opsworks.
|
||||
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
|
||||
OrganizationsServiceID = "organizations" // Organizations.
|
||||
|
@ -307,6 +309,16 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.mediatailor": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.pricing": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
|
@ -434,6 +446,7 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
|
@ -558,6 +571,7 @@ var awsPartition = partition{
|
|||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
@ -1046,6 +1060,7 @@ var awsPartition = partition{
|
|||
"elasticfilesystem": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
|
@ -1179,8 +1194,10 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
@ -1194,6 +1211,7 @@ var awsPartition = partition{
|
|||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
|
@ -1242,11 +1260,13 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
@ -1509,8 +1529,10 @@ var awsPartition = partition{
|
|||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
@ -1622,6 +1644,35 @@ var awsPartition = partition{
|
|||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{
|
||||
Hostname: "rds.eu-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-west-1",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Hostname: "rds.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{
|
||||
Hostname: "rds.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Hostname: "rds.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"opsworks": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
@ -1805,10 +1856,11 @@ var awsPartition = partition{
|
|||
"runtime.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"s3": service{
|
||||
|
@ -1873,10 +1925,11 @@ var awsPartition = partition{
|
|||
"sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
|
@ -1991,6 +2044,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
|
@ -2081,6 +2135,10 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips-us-east-1": endpoint{},
|
||||
"fips-us-east-2": endpoint{},
|
||||
"fips-us-west-1": endpoint{},
|
||||
"fips-us-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{
|
||||
SSLCommonName: "queue.{dnsSuffix}",
|
||||
|
@ -2507,13 +2565,15 @@ var awscnPartition = partition{
|
|||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticache": service{
|
||||
|
@ -2927,6 +2987,12 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"inspector": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
6
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
|
@ -71,6 +71,12 @@ const (
|
|||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||
// to build, send, validate, or unmarshal.
|
||||
LogDebugWithRequestErrors
|
||||
|
||||
// LogDebugWithEventStreamBody states the SDK should log EventStream
|
||||
// request and response bodys. This should be used to log the EventStream
|
||||
// wire unmarshaled message content of requests and responses made while
|
||||
// using the SDK Will also enable LogDebug.
|
||||
LogDebugWithEventStreamBody
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
|
@ -14,6 +14,7 @@ type Handlers struct {
|
|||
Send HandlerList
|
||||
ValidateResponse HandlerList
|
||||
Unmarshal HandlerList
|
||||
UnmarshalStream HandlerList
|
||||
UnmarshalMeta HandlerList
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
|
@ -30,6 +31,7 @@ func (h *Handlers) Copy() Handlers {
|
|||
Send: h.Send.copy(),
|
||||
ValidateResponse: h.ValidateResponse.copy(),
|
||||
Unmarshal: h.Unmarshal.copy(),
|
||||
UnmarshalStream: h.UnmarshalStream.copy(),
|
||||
UnmarshalError: h.UnmarshalError.copy(),
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
|
@ -45,6 +47,7 @@ func (h *Handlers) Clear() {
|
|||
h.Send.Clear()
|
||||
h.Sign.Clear()
|
||||
h.Unmarshal.Clear()
|
||||
h.UnmarshalStream.Clear()
|
||||
h.UnmarshalMeta.Clear()
|
||||
h.UnmarshalError.Clear()
|
||||
h.ValidateResponse.Clear()
|
||||
|
@ -172,6 +175,21 @@ func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
|
|||
return swapped
|
||||
}
|
||||
|
||||
// Swap will swap out all handlers matching the name passed in. The matched
|
||||
// handlers will be swapped in. True is returned if the handlers were swapped.
|
||||
func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
|
||||
var swapped bool
|
||||
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == name {
|
||||
l.list[i] = replace
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// SetBackNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the end of the list.
|
||||
func (l *HandlerList) SetBackNamed(n NamedHandler) {
|
||||
|
|
9
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
|
@ -46,6 +46,7 @@ type Request struct {
|
|||
Handlers Handlers
|
||||
|
||||
Retryer
|
||||
AttemptTime time.Time
|
||||
Time time.Time
|
||||
Operation *Operation
|
||||
HTTPRequest *http.Request
|
||||
|
@ -121,6 +122,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
|||
Handlers: handlers.Copy(),
|
||||
|
||||
Retryer: retryer,
|
||||
AttemptTime: time.Now(),
|
||||
Time: time.Now(),
|
||||
ExpireTime: 0,
|
||||
Operation: operation,
|
||||
|
@ -368,9 +370,9 @@ func (r *Request) Build() error {
|
|||
return r.Error
|
||||
}
|
||||
|
||||
// Sign will sign the request returning error if errors are encountered.
|
||||
// Sign will sign the request, returning error if errors are encountered.
|
||||
//
|
||||
// Send will build the request prior to signing. All Sign Handlers will
|
||||
// Sign will build the request prior to signing. All Sign Handlers will
|
||||
// be executed in the order they were set.
|
||||
func (r *Request) Sign() error {
|
||||
r.Build()
|
||||
|
@ -440,7 +442,7 @@ func (r *Request) GetBody() io.ReadSeeker {
|
|||
return r.safeBody
|
||||
}
|
||||
|
||||
// Send will send the request returning error if errors are encountered.
|
||||
// Send will send the request, returning error if errors are encountered.
|
||||
//
|
||||
// Send will sign the request prior to sending. All Send Handlers will
|
||||
// be executed in the order they were set.
|
||||
|
@ -461,6 +463,7 @@ func (r *Request) Send() error {
|
|||
}()
|
||||
|
||||
for {
|
||||
r.AttemptTime = time.Now()
|
||||
if aws.BoolValue(r.Retryable) {
|
||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
generated
vendored
|
@ -21,7 +21,7 @@ func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
|
|||
var NoBody = noBody{}
|
||||
|
||||
// ResetBody rewinds the request body back to its starting position, and
|
||||
// set's the HTTP Request body reference. When the body is read prior
|
||||
// sets the HTTP Request body reference. When the body is read prior
|
||||
// to being sent in the HTTP request it will need to be rewound.
|
||||
//
|
||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
generated
vendored
|
@ -11,7 +11,7 @@ import (
|
|||
var NoBody = http.NoBody
|
||||
|
||||
// ResetBody rewinds the request body back to its starting position, and
|
||||
// set's the HTTP Request body reference. When the body is read prior
|
||||
// sets the HTTP Request body reference. When the body is read prior
|
||||
// to being sent in the HTTP request it will need to be rewound.
|
||||
//
|
||||
// ResetBody will automatically be called by the SDK's build handler, but if
|
||||
|
|
15
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
|
@ -35,8 +35,12 @@ type Pagination struct {
|
|||
// NewRequest should always be built from the same API operations. It is
|
||||
// undefined if different API operations are returned on subsequent calls.
|
||||
NewRequest func() (*Request, error)
|
||||
// EndPageOnSameToken, when enabled, will allow the paginator to stop on
|
||||
// token that are the same as its previous tokens.
|
||||
EndPageOnSameToken bool
|
||||
|
||||
started bool
|
||||
prevTokens []interface{}
|
||||
nextTokens []interface{}
|
||||
|
||||
err error
|
||||
|
@ -49,7 +53,15 @@ type Pagination struct {
|
|||
//
|
||||
// Will always return true if Next has not been called yet.
|
||||
func (p *Pagination) HasNextPage() bool {
|
||||
return !(p.started && len(p.nextTokens) == 0)
|
||||
if !p.started {
|
||||
return true
|
||||
}
|
||||
|
||||
hasNextPage := len(p.nextTokens) != 0
|
||||
if p.EndPageOnSameToken {
|
||||
return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
|
||||
}
|
||||
return hasNextPage
|
||||
}
|
||||
|
||||
// Err returns the error Pagination encountered when retrieving the next page.
|
||||
|
@ -96,6 +108,7 @@ func (p *Pagination) Next() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
p.prevTokens = p.nextTokens
|
||||
p.nextTokens = req.nextPageTokens()
|
||||
p.curPage = req.Data
|
||||
|
||||
|
|
45
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
generated
vendored
45
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination_test.go
generated
vendored
|
@ -466,20 +466,48 @@ func TestPagination_Standalone(t *testing.T) {
|
|||
Value, PrevToken, NextToken *string
|
||||
}
|
||||
|
||||
cases := [][]testCase{
|
||||
type testCaseList struct {
|
||||
StopOnSameToken bool
|
||||
Cases []testCase
|
||||
}
|
||||
|
||||
cases := []testCaseList{
|
||||
{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), nil},
|
||||
Cases: []testCase{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), nil},
|
||||
},
|
||||
StopOnSameToken: false,
|
||||
},
|
||||
{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), aws.String("")},
|
||||
Cases: []testCase{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), aws.String("")},
|
||||
},
|
||||
StopOnSameToken: false,
|
||||
},
|
||||
{
|
||||
Cases: []testCase{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{nil, aws.String("SecondToken"), aws.String("SecondToken")},
|
||||
},
|
||||
StopOnSameToken: true,
|
||||
},
|
||||
{
|
||||
Cases: []testCase{
|
||||
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
|
||||
testCase{aws.String("SecondValue"), aws.String("SecondToken"), aws.String("SecondToken")},
|
||||
},
|
||||
StopOnSameToken: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
for _, testcase := range cases {
|
||||
c := testcase.Cases
|
||||
input := testPageInput{
|
||||
NextToken: c[0].PrevToken,
|
||||
}
|
||||
|
@ -487,6 +515,7 @@ func TestPagination_Standalone(t *testing.T) {
|
|||
svc := awstesting.NewClient()
|
||||
i := 0
|
||||
p := request.Pagination{
|
||||
EndPageOnSameToken: testcase.StopOnSameToken,
|
||||
NewRequest: func() (*request.Request, error) {
|
||||
r := svc.NewRequest(
|
||||
&request.Operation{
|
||||
|
|
20
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
|
@ -96,9 +96,23 @@ type envConfig struct {
|
|||
//
|
||||
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
csmEnabled string
|
||||
CSMEnabled bool
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
}
|
||||
|
||||
var (
|
||||
csmEnabledEnvKey = []string{
|
||||
"AWS_CSM_ENABLED",
|
||||
}
|
||||
csmPortEnvKey = []string{
|
||||
"AWS_CSM_PORT",
|
||||
}
|
||||
csmClientIDEnvKey = []string{
|
||||
"AWS_CSM_CLIENT_ID",
|
||||
}
|
||||
credAccessEnvKey = []string{
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_ACCESS_KEY",
|
||||
|
@ -157,6 +171,12 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
|||
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
|
||||
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
|
||||
|
||||
// CSM environment variables
|
||||
setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
|
||||
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
|
||||
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
|
||||
cfg.CSMEnabled = len(cfg.csmEnabled) > 0
|
||||
|
||||
// Require logical grouping of credentials
|
||||
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
|
||||
cfg.Creds = credentials.Value{}
|
||||
|
|
26
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
26
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/csm"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
|
@ -81,10 +82,16 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
r.Error = err
|
||||
})
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
return deprecatedNewSession(cfgs...)
|
||||
s := deprecatedNewSession(cfgs...)
|
||||
if envCfg.CSMEnabled {
|
||||
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// NewSession returns a new Session created from SDK defaults, config files,
|
||||
|
@ -300,10 +307,22 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
|
|||
}
|
||||
|
||||
initHandlers(s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
|
||||
logger.Log("Enabling CSM")
|
||||
if len(port) == 0 {
|
||||
port = csm.DefaultPort
|
||||
}
|
||||
|
||||
r, err := csm.Start(clientID, "127.0.0.1:"+port)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r.InjectHandlers(handlers)
|
||||
}
|
||||
|
||||
func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
|
||||
cfg := defaults.Config()
|
||||
handlers := defaults.Handlers()
|
||||
|
@ -343,6 +362,9 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
}
|
||||
|
||||
initHandlers(s)
|
||||
if envCfg.CSMEnabled {
|
||||
enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
|
|
18
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/functional_test.go
generated
vendored
|
@ -48,7 +48,7 @@ func TestPresignHandler(t *testing.T) {
|
|||
expectedHost := "bucket.s3.mock-region.amazonaws.com"
|
||||
expectedDate := "19700101T000000Z"
|
||||
expectedHeaders := "content-disposition;host;x-amz-acl"
|
||||
expectedSig := "a46583256431b09eb45ba4af2e6286d96a9835ed13721023dc8076dfdcb90fcb"
|
||||
expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b"
|
||||
expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
|
||||
|
||||
u, _ := url.Parse(urlstr)
|
||||
|
@ -71,8 +71,8 @@ func TestPresignHandler(t *testing.T) {
|
|||
if e, a := "300", urlQ.Get("X-Amz-Expires"); e != a {
|
||||
t.Errorf("expect %v, got %v", e, a)
|
||||
}
|
||||
if e, a := "UNSIGNED-PAYLOAD", urlQ.Get("X-Amz-Content-Sha256"); e != a {
|
||||
t.Errorf("expect %v, got %v", e, a)
|
||||
if a := urlQ.Get("X-Amz-Content-Sha256"); len(a) != 0 {
|
||||
t.Errorf("expect no content sha256 got %v", a)
|
||||
}
|
||||
|
||||
if e, a := "+", urlstr; strings.Contains(a, e) { // + encoded as %20
|
||||
|
@ -98,7 +98,7 @@ func TestPresignRequest(t *testing.T) {
|
|||
expectedHost := "bucket.s3.mock-region.amazonaws.com"
|
||||
expectedDate := "19700101T000000Z"
|
||||
expectedHeaders := "content-disposition;host;x-amz-acl"
|
||||
expectedSig := "a46583256431b09eb45ba4af2e6286d96a9835ed13721023dc8076dfdcb90fcb"
|
||||
expectedSig := "2d76a414208c0eac2a23ef9c834db9635ecd5a0fbb447a00ad191f82d854f55b"
|
||||
expectedCred := "AKID/19700101/mock-region/s3/aws4_request"
|
||||
expectedHeaderMap := http.Header{
|
||||
"x-amz-acl": []string{"public-read"},
|
||||
|
@ -128,8 +128,8 @@ func TestPresignRequest(t *testing.T) {
|
|||
if e, a := "300", urlQ.Get("X-Amz-Expires"); e != a {
|
||||
t.Errorf("expect %v, got %v", e, a)
|
||||
}
|
||||
if e, a := "UNSIGNED-PAYLOAD", urlQ.Get("X-Amz-Content-Sha256"); e != a {
|
||||
t.Errorf("expect %v, got %v", e, a)
|
||||
if a := urlQ.Get("X-Amz-Content-Sha256"); len(a) != 0 {
|
||||
t.Errorf("expect no content sha256 got %v", a)
|
||||
}
|
||||
|
||||
if e, a := "+", urlstr; strings.Contains(a, e) { // + encoded as %20
|
||||
|
@ -169,7 +169,7 @@ func TestStandaloneSign_WithPort(t *testing.T) {
|
|||
|
||||
cases := []struct {
|
||||
description string
|
||||
url string
|
||||
url string
|
||||
expectedSig string
|
||||
}{
|
||||
{
|
||||
|
@ -213,7 +213,7 @@ func TestStandalonePresign_WithPort(t *testing.T) {
|
|||
|
||||
cases := []struct {
|
||||
description string
|
||||
url string
|
||||
url string
|
||||
expectedSig string
|
||||
}{
|
||||
{
|
||||
|
@ -241,7 +241,7 @@ func TestStandalonePresign_WithPort(t *testing.T) {
|
|||
for _, c := range cases {
|
||||
signer := v4.NewSigner(unit.Session.Config.Credentials)
|
||||
req, _ := http.NewRequest("GET", c.url, nil)
|
||||
_, err := signer.Presign(req, nil, "es", "us-east-1", 5 * time.Minute, time.Unix(0, 0))
|
||||
_, err := signer.Presign(req, nil, "es", "us-east-1", 5*time.Minute, time.Unix(0, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
|
|
13
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
|
@ -135,6 +135,7 @@ var requiredSignedHeaders = rules{
|
|||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||
"X-Amz-Storage-Class": struct{}{},
|
||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||
"X-Amz-Content-Sha256": struct{}{},
|
||||
},
|
||||
},
|
||||
patterns{"X-Amz-Meta-"},
|
||||
|
@ -671,8 +672,15 @@ func (ctx *signingCtx) buildSignature() {
|
|||
func (ctx *signingCtx) buildBodyDigest() error {
|
||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
|
||||
includeSHA256Header := ctx.unsignedPayload ||
|
||||
ctx.ServiceName == "s3" ||
|
||||
ctx.ServiceName == "glacier"
|
||||
|
||||
s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
|
||||
|
||||
if ctx.unsignedPayload || s3Presign {
|
||||
hash = "UNSIGNED-PAYLOAD"
|
||||
includeSHA256Header = !s3Presign
|
||||
} else if ctx.Body == nil {
|
||||
hash = emptyStringSHA256
|
||||
} else {
|
||||
|
@ -681,7 +689,8 @@ func (ctx *signingCtx) buildBodyDigest() error {
|
|||
}
|
||||
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||
}
|
||||
if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
|
||||
|
||||
if includeSHA256Header {
|
||||
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue