Update go dep (#1560)

This fix updates go dep with `dep ensure --update` as well as the following:
- Removed github.com/ugorji/go restriction in Gopkg.toml (fixes  #1557)
- Added github.com/flynn/go-shlex in Makefile (neede by Caddy, maybe removed later)

This fix fixes #1557

Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Yong Tang 2018-02-23 12:10:34 -08:00 committed by Miek Gieben
parent 9047bdf3a0
commit 604c0045e7
563 changed files with 107278 additions and 95314 deletions

88
Gopkg.lock generated
View file

@ -16,8 +16,8 @@
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "3b1b38866a79f06deddf0487d5c27ba0697ccd65"
version = "v1.15.0"
revision = "f7be6aa2bc7b2e38edf816b08b582782194a1c02"
version = "v1.16.0"
[[projects]]
name = "github.com/apache/thrift"
@ -44,6 +44,7 @@
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
@ -55,8 +56,8 @@
"service/route53/route53iface",
"service/sts"
]
revision = "0cebc639926eb91b0192dae4b28bc808417e764c"
version = "v1.12.61"
revision = "f7f1ee9550cc76ceea731d99c8dbe00a0c39f66d"
version = "v1.13.4"
[[projects]]
name = "github.com/coreos/etcd"
@ -67,8 +68,8 @@
"pkg/types",
"version"
]
revision = "fb5cd6f1c72c2b98e0c4453444cdfba306da867b"
version = "v3.2.14"
revision = "28f3f26c0e303392556035b694f75768d449d33d"
version = "v3.3.1"
[[projects]]
name = "github.com/coreos/go-semver"
@ -112,14 +113,14 @@
".",
"log"
]
revision = "2dd44038f0b95ae693b266c5f87593b5d2fdd78d"
version = "v2.5.0"
revision = "26b41036311f2da8242db402557a0dbd09dc83da"
version = "v2.6.0"
[[projects]]
branch = "master"
name = "github.com/farsightsec/golang-framestream"
packages = ["."]
revision = "dec85654e8b8cf6712870afb14ee53d1c98cd5e2"
revision = "c06a5734334d9629b3db143d74b47eb94ea68612"
[[projects]]
name = "github.com/ghodss/yaml"
@ -155,13 +156,13 @@
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "fa03337d7da5735229ee8f5e9d5d0b996014b7f8"
revision = "1de3e0542de65ad8d75452a595886fdd0befb363"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "84f4bee7c0a6db40e3166044c7983c1c32125429"
revision = "0d03ad0b6405ada874d59d97c416b5cf4234e154"
[[projects]]
name = "github.com/gogo/protobuf"
@ -169,8 +170,8 @@
"proto",
"sortkeys"
]
revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
version = "v0.5"
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
branch = "master"
@ -179,7 +180,6 @@
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = [
"proto",
@ -188,7 +188,8 @@
"ptypes/duration",
"ptypes/timestamp"
]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
branch = "master"
@ -200,7 +201,7 @@
branch = "master"
name = "github.com/google/btree"
packages = ["."]
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
branch = "master"
@ -240,7 +241,7 @@
".",
"simplelru"
]
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
branch = "master"
@ -251,8 +252,8 @@
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "f1ac5984e69fed03e0574a92f70c59f132616ea2"
version = "0.3.0"
revision = "163f41321a19dd09362d4c63cc2489db2015f1f4"
version = "0.3.2"
[[projects]]
name = "github.com/jmespath/go-jmespath"
@ -262,14 +263,14 @@
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "f7279a603edee96fe7764d3de9c6ff8cf9970994"
version = "1.0.4"
revision = "0ac74bba4a81211b28e32ef260c0f16ae41f1377"
version = "1.1.1"
[[projects]]
branch = "master"
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
version = "1.0.1"
[[projects]]
branch = "master"
@ -366,7 +367,7 @@
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "e181e095bae94582363434144c61a9653aff6e50"
revision = "8732c616f52954686704c8645fe1a9d59e9df7c1"
[[projects]]
name = "github.com/spf13/pflag"
@ -377,13 +378,23 @@
[[projects]]
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "8c0409fcbb70099c748d71f714529204975f6c3f"
revision = "9831f2c3ac1068a78f50999a30db84270f647af6"
version = "v1.1"
[[projects]]
name = "github.com/v2pro/plz"
packages = [
"concurrent",
"reflect2"
]
revision = "10fc95fad3224a032229e59f6e7023137d82b526"
version = "0.9.1"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
revision = "49796115aa4b964c318aad4f3084fdb41e9aa067"
[[projects]]
branch = "master"
@ -392,27 +403,13 @@
"unix",
"windows"
]
revision = "810d7000345868fc619eb81f46307107118f4ae1"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"internal/gen",
"internal/triegen",
"internal/ucd",
"transform",
"unicode/cldr",
"unicode/norm",
"width"
]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
revision = "88d2dcc510266da9f7f8c7f34e1940716cab5f5c"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "a8101f21cf983e773d0c1133ebc5424792003214"
revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7"
[[projects]]
name = "google.golang.org/grpc"
@ -425,6 +422,7 @@
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
@ -440,8 +438,8 @@
"tap",
"transport"
]
revision = "7cea4cc846bcf00cbb27595b07da5de875ef7de9"
version = "v1.9.1"
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[[projects]]
name = "gopkg.in/inf.v0"
@ -593,11 +591,11 @@
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
revision = "a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3"
revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "eb43278f9f3825b0fa2ba07fd7ecd66fa9741b62bb276f7246db83bc53a39d84"
inputs-digest = "e302488e45c3f12eae09558fb7c71cf495826866cb5b764745c10fffcb325537"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -2,6 +2,7 @@ ignored = [
"github.com/mholt/caddy",
"github.com/mholt/caddy/caddyfile",
"github.com/mholt/caddy/startupshutdown",
"github.com/mholt/caddy/onevent",
"github.com/miekg/dns",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
@ -13,12 +14,15 @@ ignored = [
"golang.org/x/net/internal/timeseries",
"golang.org/x/net/lex/httplex",
"golang.org/x/net/trace",
"golang.org/x/text/internal/gen",
"golang.org/x/text/internal/triegen",
"golang.org/x/text/internal/ucd",
"golang.org/x/text/transform",
"golang.org/x/text/unicode/cldr",
"golang.org/x/text/unicode/norm",
"golang.org/x/text/width",
]
[[override]]
name = "github.com/ugorji/go"
revision = "8c0409fcbb70099c748d71f714529204975f6c3f"
# client-go 6.0.0 uses apimachinery 180eddb345a5be3a157cea1c624700ad5bd27b8f
# and api 11147472b7c934c474a2c484af3c0c5210b7a3af (see Godep.json). go dep
# is unable to match Godep.json automatically so have to specify here.

View file

@ -40,6 +40,8 @@ godeps:
(cd $(GOPATH)/src/github.com/prometheus/client_golang && git checkout -q v0.8.0)
(cd $(GOPATH)/src/golang.org/x/net && git checkout -q release-branch.go1.10)
(cd $(GOPATH)/src/golang.org/x/text && git checkout -q v0.3.0)
# github.com/flynn/go-shlex is required by mholt/caddy at the moment
go get -u github.com/flynn/go-shlex
.PHONY: travis
travis: check

View file

@ -1,6 +1,5 @@
language: go
go:
- 1.7.x
- 1.8.x
- 1.9.x
@ -22,8 +21,7 @@ before_install:
- vagrant/boot_cluster.sh
- vagrant/create_topics.sh
install:
- make install_dependencies
install: make install_dependencies
script:
- make test
@ -32,6 +30,6 @@ script:
- make fmt
after_success:
- bash <(curl -s https://codecov.io/bash)
- bash <(curl -s https://codecov.io/bash)
sudo: false
after_script: vagrant/halt_cluster.sh

View file

@ -1,5 +1,47 @@
# Changelog
#### Version 1.16.0 (2018-02-12)
New Features:
- Add support for the Create/Delete Topics request/response pairs
([#1007](https://github.com/Shopify/sarama/pull/1007),
[#1008](https://github.com/Shopify/sarama/pull/1008)).
- Add support for the Describe/Create/Delete ACL request/response pairs
([#1009](https://github.com/Shopify/sarama/pull/1009)).
- Add support for the five transaction-related request/response pairs
([#1016](https://github.com/Shopify/sarama/pull/1016)).
Improvements:
- Permit setting version on mock producer responses
([#999](https://github.com/Shopify/sarama/pull/999)).
- Add `NewMockBrokerListener` helper for testing TLS connections
([#1019](https://github.com/Shopify/sarama/pull/1019)).
- Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
which results in much higher throughput in most cases
([#1024](https://github.com/Shopify/sarama/pull/1024)).
- Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
reduce CPU and memory usage when processing many partitions
([#1028](https://github.com/Shopify/sarama/pull/1028)).
- Assign relative offsets to messages in the producer to save the brokers a
recompression pass
([#1002](https://github.com/Shopify/sarama/pull/1002),
[#1015](https://github.com/Shopify/sarama/pull/1015)).
Bug Fixes:
- Fix producing uncompressed batches with the new protocol format
([#1032](https://github.com/Shopify/sarama/issues/1032)).
- Fix consuming compacted topics with the new protocol format
([#1005](https://github.com/Shopify/sarama/issues/1005)).
- Fix consuming topics with a mix of protocol formats
([#1021](https://github.com/Shopify/sarama/issues/1021)).
- Fix consuming when the broker includes multiple batches in a single response
([#1022](https://github.com/Shopify/sarama/issues/1022)).
- Fix detection of `PartialTrailingMessage` when the partial message was
truncated before the magic value indicating its version
([#1030](https://github.com/Shopify/sarama/pull/1030)).
- Fix expectation-checking in the mock of `SyncProducer.SendMessages`
([#1035](https://github.com/Shopify/sarama/pull/1035)).
#### Version 1.15.0 (2017-12-08)
New Features:

View file

@ -21,7 +21,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are
Go 1.9 and 1.8, and Kafka 1.0 through 0.10, although older releases are
still likely to work.
Sarama follows semantic versioning and provides API stability via the gopkg.in service.

119
vendor/github.com/Shopify/sarama/acl_bindings.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
package sarama
type Resource struct {
ResourceType AclResourceType
ResourceName string
}
func (r *Resource) encode(pe packetEncoder) error {
pe.putInt8(int8(r.ResourceType))
if err := pe.putString(r.ResourceName); err != nil {
return err
}
return nil
}
func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
r.ResourceType = AclResourceType(resourceType)
if r.ResourceName, err = pd.getString(); err != nil {
return err
}
return nil
}
type Acl struct {
Principal string
Host string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *Acl) encode(pe packetEncoder) error {
if err := pe.putString(a.Principal); err != nil {
return err
}
if err := pe.putString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
if a.Principal, err = pd.getString(); err != nil {
return err
}
if a.Host, err = pd.getString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}
type ResourceAcls struct {
Resource
Acls []*Acl
}
func (r *ResourceAcls) encode(pe packetEncoder) error {
if err := r.Resource.encode(pe); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Acls)); err != nil {
return err
}
for _, acl := range r.Acls {
if err := acl.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
if err := r.Resource.decode(pd, version); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Acls = make([]*Acl, n)
for i := 0; i < n; i++ {
r.Acls[i] = new(Acl)
if err := r.Acls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}

76
vendor/github.com/Shopify/sarama/acl_create_request.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
package sarama
type CreateAclsRequest struct {
AclCreations []*AclCreation
}
func (c *CreateAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
return err
}
for _, aclCreation := range c.AclCreations {
if err := aclCreation.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreations = make([]*AclCreation, n)
for i := 0; i < n; i++ {
c.AclCreations[i] = new(AclCreation)
if err := c.AclCreations[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsRequest) key() int16 {
return 30
}
func (d *CreateAclsRequest) version() int16 {
return 0
}
func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreation struct {
Resource
Acl
}
func (a *AclCreation) encode(pe packetEncoder) error {
if err := a.Resource.encode(pe); err != nil {
return err
}
if err := a.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
if err := a.Resource.decode(pd, version); err != nil {
return err
}
if err := a.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,34 @@
package sarama
import "testing"
var (
aclCreateRequest = []byte{
0, 0, 0, 1,
3, // resource type = group
0, 5, 'g', 'r', 'o', 'u', 'p',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
2, // all
2, // deny
}
)
func TestCreateAclsRequest(t *testing.T) {
req := &CreateAclsRequest{
AclCreations: []*AclCreation{{
Resource: Resource{
ResourceType: AclResourceGroup,
ResourceName: "group",
},
Acl: Acl{
Principal: "principal",
Host: "host",
Operation: AclOperationAll,
PermissionType: AclPermissionDeny,
}},
},
}
testRequest(t, "create request", req, aclCreateRequest)
}

View file

@ -0,0 +1,88 @@
package sarama
import "time"
type CreateAclsResponse struct {
ThrottleTime time.Duration
AclCreationResponses []*AclCreationResponse
}
func (c *CreateAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
return err
}
for _, aclCreationResponse := range c.AclCreationResponses {
if err := aclCreationResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.AclCreationResponses = make([]*AclCreationResponse, n)
for i := 0; i < n; i++ {
c.AclCreationResponses[i] = new(AclCreationResponse)
if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *CreateAclsResponse) key() int16 {
return 30
}
func (d *CreateAclsResponse) version() int16 {
return 0
}
func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type AclCreationResponse struct {
Err KError
ErrMsg *string
}
func (a *AclCreationResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(a.Err))
if err := pe.putNullableString(a.ErrMsg); err != nil {
return err
}
return nil
}
func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
if a.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,41 @@
package sarama
import (
"testing"
"time"
)
var (
createResponseWithError = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 42,
0, 5, 'e', 'r', 'r', 'o', 'r',
}
createResponseArray = []byte{
0, 0, 0, 100,
0, 0, 0, 2,
0, 42,
0, 5, 'e', 'r', 'r', 'o', 'r',
0, 0,
255, 255,
}
)
func TestCreateAclsResponse(t *testing.T) {
errmsg := "error"
resp := &CreateAclsResponse{
ThrottleTime: 100 * time.Millisecond,
AclCreationResponses: []*AclCreationResponse{{
Err: ErrInvalidRequest,
ErrMsg: &errmsg,
}},
}
testResponse(t, "response with error", resp, createResponseWithError)
resp.AclCreationResponses = append(resp.AclCreationResponses, new(AclCreationResponse))
testResponse(t, "response array", resp, createResponseArray)
}

48
vendor/github.com/Shopify/sarama/acl_delete_request.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
package sarama
type DeleteAclsRequest struct {
Filters []*AclFilter
}
func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(d.Filters)); err != nil {
return err
}
for _, filter := range d.Filters {
if err := filter.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.Filters = make([]*AclFilter, n)
for i := 0; i < n; i++ {
d.Filters[i] = new(AclFilter)
if err := d.Filters[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsRequest) key() int16 {
return 31
}
func (d *DeleteAclsRequest) version() int16 {
return 0
}
func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,69 @@
package sarama
import "testing"
var (
aclDeleteRequestNulls = []byte{
0, 0, 0, 1,
1,
255, 255,
255, 255,
255, 255,
11,
3,
}
aclDeleteRequest = []byte{
0, 0, 0, 1,
1, // any
0, 6, 'f', 'i', 'l', 't', 'e', 'r',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
}
aclDeleteRequestArray = []byte{
0, 0, 0, 2,
1,
0, 6, 'f', 'i', 'l', 't', 'e', 'r',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
2,
0, 5, 't', 'o', 'p', 'i', 'c',
255, 255,
255, 255,
6,
2,
}
)
func TestDeleteAclsRequest(t *testing.T) {
req := &DeleteAclsRequest{
Filters: []*AclFilter{{
ResourceType: AclResourceAny,
Operation: AclOperationAlterConfigs,
PermissionType: AclPermissionAllow,
}},
}
testRequest(t, "delete request nulls", req, aclDeleteRequestNulls)
req.Filters[0].ResourceName = nullString("filter")
req.Filters[0].Principal = nullString("principal")
req.Filters[0].Host = nullString("host")
req.Filters[0].Operation = AclOperationWrite
testRequest(t, "delete request", req, aclDeleteRequest)
req.Filters = append(req.Filters, &AclFilter{
ResourceType: AclResourceTopic,
ResourceName: nullString("topic"),
Operation: AclOperationDelete,
PermissionType: AclPermissionDeny,
})
testRequest(t, "delete request array", req, aclDeleteRequestArray)
}

155
vendor/github.com/Shopify/sarama/acl_delete_response.go generated vendored Normal file
View file

@ -0,0 +1,155 @@
package sarama
import "time"
type DeleteAclsResponse struct {
ThrottleTime time.Duration
FilterResponses []*FilterResponse
}
func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
return err
}
for _, filterResponse := range a.FilterResponses {
if err := filterResponse.encode(pe); err != nil {
return err
}
}
return nil
}
func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.FilterResponses = make([]*FilterResponse, n)
for i := 0; i < n; i++ {
a.FilterResponses[i] = new(FilterResponse)
if err := a.FilterResponses[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DeleteAclsResponse) key() int16 {
return 31
}
func (d *DeleteAclsResponse) version() int16 {
return 0
}
func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type FilterResponse struct {
Err KError
ErrMsg *string
MatchingAcls []*MatchingAcl
}
func (f *FilterResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(f.Err))
if err := pe.putNullableString(f.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
return err
}
for _, matchingAcl := range f.MatchingAcls {
if err := matchingAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
f.Err = KError(kerr)
if f.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
f.MatchingAcls = make([]*MatchingAcl, n)
for i := 0; i < n; i++ {
f.MatchingAcls[i] = new(MatchingAcl)
if err := f.MatchingAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
type MatchingAcl struct {
Err KError
ErrMsg *string
Resource
Acl
}
func (m *MatchingAcl) encode(pe packetEncoder) error {
pe.putInt16(int16(m.Err))
if err := pe.putNullableString(m.ErrMsg); err != nil {
return err
}
if err := m.Resource.encode(pe); err != nil {
return err
}
if err := m.Acl.encode(pe); err != nil {
return err
}
return nil
}
func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
m.Err = KError(kerr)
if m.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
if err := m.Resource.decode(pd, version); err != nil {
return err
}
if err := m.Acl.decode(pd, version); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,38 @@
package sarama
import (
"testing"
"time"
)
var (
deleteAclsResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 0, // no error
255, 255, // no error message
0, 0, 0, 1, // 1 matching acl
0, 0, // no error
255, 255, // no error message
2, // resource type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4,
3,
}
)
func TestDeleteAclsResponse(t *testing.T) {
resp := &DeleteAclsResponse{
ThrottleTime: 100 * time.Millisecond,
FilterResponses: []*FilterResponse{{
MatchingAcls: []*MatchingAcl{{
Resource: Resource{ResourceType: AclResourceTopic, ResourceName: "topic"},
Acl: Acl{Principal: "principal", Host: "host", Operation: AclOperationWrite, PermissionType: AclPermissionAllow},
}},
}},
}
testResponse(t, "", resp, deleteAclsResponse)
}

View file

@ -0,0 +1,25 @@
package sarama
type DescribeAclsRequest struct {
AclFilter
}
func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
return d.AclFilter.encode(pe)
}
func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
return d.AclFilter.decode(pd, version)
}
func (d *DescribeAclsRequest) key() int16 {
return 29
}
func (d *DescribeAclsRequest) version() int16 {
return 0
}
func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,35 @@
package sarama
import (
"testing"
)
var (
aclDescribeRequest = []byte{
2, // resource type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
5, // acl operation
3, // acl permission type
}
)
func TestAclDescribeRequest(t *testing.T) {
resourcename := "topic"
principal := "principal"
host := "host"
req := &DescribeAclsRequest{
AclFilter{
ResourceType: AclResourceTopic,
ResourceName: &resourcename,
Principal: &principal,
Host: &host,
Operation: AclOperationCreate,
PermissionType: AclPermissionAllow,
},
}
testRequest(t, "", req, aclDescribeRequest)
}

View file

@ -0,0 +1,80 @@
package sarama
import "time"
type DescribeAclsResponse struct {
ThrottleTime time.Duration
Err KError
ErrMsg *string
ResourceAcls []*ResourceAcls
}
func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
pe.putInt16(int16(d.Err))
if err := pe.putNullableString(d.ErrMsg); err != nil {
return err
}
if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
return err
}
for _, resourceAcl := range d.ResourceAcls {
if err := resourceAcl.encode(pe); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
d.Err = KError(kerr)
errmsg, err := pd.getString()
if err != nil {
return err
}
if errmsg != "" {
d.ErrMsg = &errmsg
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.ResourceAcls = make([]*ResourceAcls, n)
for i := 0; i < n; i++ {
d.ResourceAcls[i] = new(ResourceAcls)
if err := d.ResourceAcls[i].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (d *DescribeAclsResponse) key() int16 {
return 29
}
func (d *DescribeAclsResponse) version() int16 {
return 0
}
func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,45 @@
package sarama
import (
"testing"
"time"
)
var aclDescribeResponseError = []byte{
0, 0, 0, 100,
0, 8, // error
0, 5, 'e', 'r', 'r', 'o', 'r',
0, 0, 0, 1, // 1 resource
2, // cluster type
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 acl
0, 9, 'p', 'r', 'i', 'n', 'c', 'i', 'p', 'a', 'l',
0, 4, 'h', 'o', 's', 't',
4, // write
3, // allow
}
func TestAclDescribeResponse(t *testing.T) {
errmsg := "error"
resp := &DescribeAclsResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrBrokerNotAvailable,
ErrMsg: &errmsg,
ResourceAcls: []*ResourceAcls{{
Resource: Resource{
ResourceName: "topic",
ResourceType: AclResourceTopic,
},
Acls: []*Acl{
{
Principal: "principal",
Host: "host",
Operation: AclOperationWrite,
PermissionType: AclPermissionAllow,
},
},
}},
}
testResponse(t, "describe", resp, aclDescribeResponseError)
}

61
vendor/github.com/Shopify/sarama/acl_filter.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package sarama
type AclFilter struct {
ResourceType AclResourceType
ResourceName *string
Principal *string
Host *string
Operation AclOperation
PermissionType AclPermissionType
}
func (a *AclFilter) encode(pe packetEncoder) error {
pe.putInt8(int8(a.ResourceType))
if err := pe.putNullableString(a.ResourceName); err != nil {
return err
}
if err := pe.putNullableString(a.Principal); err != nil {
return err
}
if err := pe.putNullableString(a.Host); err != nil {
return err
}
pe.putInt8(int8(a.Operation))
pe.putInt8(int8(a.PermissionType))
return nil
}
func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
resourceType, err := pd.getInt8()
if err != nil {
return err
}
a.ResourceType = AclResourceType(resourceType)
if a.ResourceName, err = pd.getNullableString(); err != nil {
return err
}
if a.Principal, err = pd.getNullableString(); err != nil {
return err
}
if a.Host, err = pd.getNullableString(); err != nil {
return err
}
operation, err := pd.getInt8()
if err != nil {
return err
}
a.Operation = AclOperation(operation)
permissionType, err := pd.getInt8()
if err != nil {
return err
}
a.PermissionType = AclPermissionType(permissionType)
return nil
}

42
vendor/github.com/Shopify/sarama/acl_types.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package sarama
type AclOperation int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
const (
AclOperationUnknown AclOperation = 0
AclOperationAny AclOperation = 1
AclOperationAll AclOperation = 2
AclOperationRead AclOperation = 3
AclOperationWrite AclOperation = 4
AclOperationCreate AclOperation = 5
AclOperationDelete AclOperation = 6
AclOperationAlter AclOperation = 7
AclOperationDescribe AclOperation = 8
AclOperationClusterAction AclOperation = 9
AclOperationDescribeConfigs AclOperation = 10
AclOperationAlterConfigs AclOperation = 11
AclOperationIdempotentWrite AclOperation = 12
)
type AclPermissionType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
const (
AclPermissionUnknown AclPermissionType = 0
AclPermissionAny AclPermissionType = 1
AclPermissionDeny AclPermissionType = 2
AclPermissionAllow AclPermissionType = 3
)
type AclResourceType int
// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
const (
AclResourceUnknown AclResourceType = 0
AclResourceAny AclResourceType = 1
AclResourceTopic AclResourceType = 2
AclResourceGroup AclResourceType = 3
AclResourceCluster AclResourceType = 4
AclResourceTransactionalID AclResourceType = 5
)

View file

@ -0,0 +1,52 @@
package sarama
type AddOffsetsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
GroupID string
}
func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putString(a.GroupID); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.GroupID, err = pd.getString(); err != nil {
return err
}
return nil
}
func (a *AddOffsetsToTxnRequest) key() int16 {
return 25
}
func (a *AddOffsetsToTxnRequest) version() int16 {
return 0
}
func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,23 @@
package sarama
import "testing"
var (
addOffsetsToTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64,
0, 0,
0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd',
}
)
func TestAddOffsetsToTxnRequest(t *testing.T) {
req := &AddOffsetsToTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 0,
GroupID: "groupid",
}
testRequest(t, "", req, addOffsetsToTxnRequest)
}

View file

@ -0,0 +1,44 @@
package sarama
import (
"time"
)
type AddOffsetsToTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
pe.putInt16(int16(a.Err))
return nil
}
func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
a.Err = KError(kerr)
return nil
}
func (a *AddOffsetsToTxnResponse) key() int16 {
return 25
}
func (a *AddOffsetsToTxnResponse) version() int16 {
return 0
}
func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,22 @@
package sarama
import (
"testing"
"time"
)
var (
addOffsetsToTxnResponse = []byte{
0, 0, 0, 100,
0, 47,
}
)
func TestAddOffsetsToTxnResponse(t *testing.T) {
resp := &AddOffsetsToTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrInvalidProducerEpoch,
}
testResponse(t, "", resp, addOffsetsToTxnResponse)
}

View file

@ -0,0 +1,76 @@
package sarama
type AddPartitionsToTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TopicPartitions map[string][]int32
}
func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
return err
}
for topic, partitions := range a.TopicPartitions {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.TopicPartitions = make(map[string][]int32)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
a.TopicPartitions[topic] = partitions
}
return nil
}
func (a *AddPartitionsToTxnRequest) key() int16 {
return 24
}
func (a *AddPartitionsToTxnRequest) version() int16 {
return 0
}
func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,27 @@
package sarama
import "testing"
var (
addPartitionsToTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64, // ProducerID
0, 0, 0, 0, // ProducerEpoch
0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, 0, 0, 0, 1,
}
)
func TestAddPartitionsToTxnRequest(t *testing.T) {
req := &AddPartitionsToTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 0,
TopicPartitions: map[string][]int32{
"topic": []int32{1},
},
}
testRequest(t, "", req, addPartitionsToTxnRequest)
}

View file

@ -0,0 +1,108 @@
package sarama
import (
"time"
)
type AddPartitionsToTxnResponse struct {
ThrottleTime time.Duration
Errors map[string][]*PartitionError
}
func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(a.Errors)); err != nil {
return err
}
for topic, e := range a.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
a.Errors[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
a.Errors[topic][j] = new(PartitionError)
if err := a.Errors[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *AddPartitionsToTxnResponse) key() int16 {
return 24
}
func (a *AddPartitionsToTxnResponse) version() int16 {
return 0
}
func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type PartitionError struct {
Partition int32
Err KError
}
func (p *PartitionError) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt16(int16(p.Err))
return nil
}
func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
kerr, err := pd.getInt16()
if err != nil {
return err
}
p.Err = KError(kerr)
return nil
}

View file

@ -0,0 +1,31 @@
package sarama
import (
"testing"
"time"
)
var (
addPartitionsToTxnResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition error
0, 0, 0, 2, // partition 2
0, 48, // error
}
)
func TestAddPartitionsToTxnResponse(t *testing.T) {
resp := &AddPartitionsToTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Errors: map[string][]*PartitionError{
"topic": []*PartitionError{&PartitionError{
Err: ErrInvalidTxnState,
Partition: 2,
}},
},
}
testResponse(t, "", resp, addPartitionsToTxnResponse)
}

View file

@ -0,0 +1,120 @@
package sarama
type AlterConfigsRequest struct {
Resources []*AlterConfigsResource
ValidateOnly bool
}
type AlterConfigsResource struct {
Type ConfigResourceType
Name string
ConfigEntries map[string]*string
}
func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(acr.Resources)); err != nil {
return err
}
for _, r := range acr.Resources {
if err := r.encode(pe); err != nil {
return err
}
}
pe.putBool(acr.ValidateOnly)
return nil
}
func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
resourceCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResource, resourceCount)
for i := range acr.Resources {
r := &AlterConfigsResource{}
err = r.decode(pd, version)
if err != nil {
return err
}
acr.Resources[i] = r
}
validateOnly, err := pd.getBool()
if err != nil {
return err
}
acr.ValidateOnly = validateOnly
return nil
}
func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
pe.putInt8(int8(ac.Type))
if err := pe.putString(ac.Name); err != nil {
return err
}
if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range ac.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
t, err := pd.getInt8()
if err != nil {
return err
}
ac.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
ac.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
ac.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return err
}
func (acr *AlterConfigsRequest) key() int16 {
return 33
}
func (acr *AlterConfigsRequest) version() int16 {
return 0
}
func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,91 @@
package sarama
import "testing"
var (
emptyAlterConfigsRequest = []byte{
0, 0, 0, 0, // 0 configs
0, // don't Validate
}
singleAlterConfigsRequest = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
0, // don't validate
}
doubleAlterConfigsRequest = []byte{
0, 0, 0, 2, // 2 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
2, // a topic
0, 3, 'b', 'a', 'r', // topic name: foo
0, 0, 0, 2, //2 config
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
0, 12, // 12 chars
'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
0, 4,
'1', '0', '0', '0',
0, // don't validate
}
)
func TestAlterConfigsRequest(t *testing.T) {
var request *AlterConfigsRequest
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{},
}
testRequest(t, "no requests", request, emptyAlterConfigsRequest)
configValue := "1000"
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{
&AlterConfigsResource{
Type: TopicResource,
Name: "foo",
ConfigEntries: map[string]*string{
"segment.ms": &configValue,
},
},
},
}
testRequest(t, "one config", request, singleAlterConfigsRequest)
request = &AlterConfigsRequest{
Resources: []*AlterConfigsResource{
&AlterConfigsResource{
Type: TopicResource,
Name: "foo",
ConfigEntries: map[string]*string{
"segment.ms": &configValue,
},
},
&AlterConfigsResource{
Type: TopicResource,
Name: "bar",
ConfigEntries: map[string]*string{
"segment.ms": &configValue,
"retention.ms": &configValue,
},
},
},
}
testRequest(t, "two configs", request, doubleAlterConfigsRequest)
}

View file

@ -0,0 +1,95 @@
package sarama
import "time"
type AlterConfigsResponse struct {
ThrottleTime time.Duration
Resources []*AlterConfigsResourceResponse
}
type AlterConfigsResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
}
func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(ct.Resources)); err != nil {
return err
}
for i := range ct.Resources {
pe.putInt16(ct.Resources[i].ErrorCode)
err := pe.putString(ct.Resources[i].ErrorMsg)
if err != nil {
return nil
}
pe.putInt8(int8(ct.Resources[i].Type))
err = pe.putString(ct.Resources[i].Name)
if err != nil {
return nil
}
}
return nil
}
func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
responseCount, err := pd.getArrayLength()
if err != nil {
return err
}
acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
for i := range acr.Resources {
acr.Resources[i] = new(AlterConfigsResourceResponse)
errCode, err := pd.getInt16()
if err != nil {
return err
}
acr.Resources[i].ErrorCode = errCode
e, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].ErrorMsg = e
t, err := pd.getInt8()
if err != nil {
return err
}
acr.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
acr.Resources[i].Name = name
}
return nil
}
func (r *AlterConfigsResponse) key() int16 {
return 32
}
func (r *AlterConfigsResponse) version() int16 {
return 0
}
func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,45 @@
package sarama
import (
"testing"
)
var (
alterResponseEmpty = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 0, // no configs
}
alterResponsePopulated = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 1, // response
0, 0, //errorcode
0, 0, //string
2, // topic
0, 3, 'f', 'o', 'o',
}
)
func TestAlterConfigsResponse(t *testing.T) {
var response *AlterConfigsResponse
response = &AlterConfigsResponse{
Resources: []*AlterConfigsResourceResponse{},
}
testVersionDecodable(t, "empty", response, alterResponseEmpty, 0)
if len(response.Resources) != 0 {
t.Error("Expected no groups")
}
response = &AlterConfigsResponse{
Resources: []*AlterConfigsResourceResponse{
&AlterConfigsResourceResponse{
ErrorCode: 0,
ErrorMsg: "",
Type: TopicResource,
Name: "foo",
},
},
}
testResponse(t, "response with error", response, alterResponsePopulated)
}

View file

@ -639,6 +639,7 @@ func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
leader.SetHandlerByMap(map[string]MockResponse{
"ProduceRequest": NewMockProduceResponse(t).
SetVersion(0).
SetError("my_topic", 0, ErrNoError),
})

View file

@ -373,6 +373,137 @@ func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse,
return response, nil
}
func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
response := new(CreateTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
response := new(DeleteTopicsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
response := new(DescribeAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
response := new(CreateAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
response := new(DeleteAclsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
response := new(InitProducerIDResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
response := new(AddPartitionsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
response := new(AddOffsetsToTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
response := new(EndTxnResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
response := new(TxnOffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
response := new(DescribeConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
response := new(AlterConfigsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()

206
vendor/github.com/Shopify/sarama/client_tls_test.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
package sarama
import (
"math/big"
"net"
"testing"
"time"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
)
func TestTLS(t *testing.T) {
cakey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
clientkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
hostkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatal(err)
}
nvb := time.Now().Add(-1 * time.Hour)
nva := time.Now().Add(1 * time.Hour)
caTemplate := &x509.Certificate{
Subject: pkix.Name{CommonName: "ca"},
Issuer: pkix.Name{CommonName: "ca"},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
IsCA: true,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageCertSign,
}
caDer, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &cakey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
caFinalCert, err := x509.ParseCertificate(caDer)
if err != nil {
t.Fatal(err)
}
hostDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{
Subject: pkix.Name{CommonName: "host"},
Issuer: pkix.Name{CommonName: "ca"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1)},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}, caFinalCert, &hostkey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
clientDer, err := x509.CreateCertificate(rand.Reader, &x509.Certificate{
Subject: pkix.Name{CommonName: "client"},
Issuer: pkix.Name{CommonName: "ca"},
SerialNumber: big.NewInt(0),
NotAfter: nva,
NotBefore: nvb,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}, caFinalCert, &clientkey.PublicKey, cakey)
if err != nil {
t.Fatal(err)
}
pool := x509.NewCertPool()
pool.AddCert(caFinalCert)
systemCerts, err := x509.SystemCertPool()
if err != nil {
t.Fatal(err)
}
// Keep server the same - it's the client that we're testing
serverTLSConfig := &tls.Config{
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{hostDer},
PrivateKey: hostkey,
}},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: pool,
}
for _, tc := range []struct {
Succeed bool
Server, Client *tls.Config
}{
{ // Verify client fails if wrong CA cert pool is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: systemCerts,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if wrong key is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: hostkey,
}},
},
},
{ // Verify client fails if wrong cert is specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{hostDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if no CAs are specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
{ // Verify client fails if no keys are specified
Succeed: false,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
},
},
{ // Finally, verify it all works happily with client and server cert in place
Succeed: true,
Server: serverTLSConfig,
Client: &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{tls.Certificate{
Certificate: [][]byte{clientDer},
PrivateKey: clientkey,
}},
},
},
} {
doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client)
}
}
func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientConfig *tls.Config) {
serverConfig.BuildNameToCertificate()
clientConfig.BuildNameToCertificate()
seedListener, err := tls.Listen("tcp", "127.0.0.1:0", serverConfig)
if err != nil {
t.Fatal("cannot open listener", err)
}
var childT *testing.T
if expectSuccess {
childT = t
} else {
childT = &testing.T{} // we want to swallow errors
}
seedBroker := NewMockBrokerListener(childT, 1, seedListener)
defer seedBroker.Close()
seedBroker.Returns(new(MetadataResponse))
config := NewConfig()
config.Net.TLS.Enable = true
config.Net.TLS.Config = clientConfig
client, err := NewClient([]string{seedBroker.Addr()}, config)
if err == nil {
safeClose(t, client)
}
if expectSuccess {
if err != nil {
t.Fatal(err)
}
} else {
if err == nil {
t.Fatal("expected failure")
}
}
}

View file

@ -175,7 +175,7 @@ type Config struct {
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 32768). This should be larger than the majority of
// request (default 1MB). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
@ -292,7 +292,7 @@ func NewConfig() *Config {
c.Producer.Return.Errors = true
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Fetch.Default = 1024 * 1024
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond

View file

@ -0,0 +1,15 @@
package sarama
type ConfigResourceType int8
// Taken from :
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
const (
UnknownResource ConfigResourceType = 0
AnyResource ConfigResourceType = 1
TopicResource ConfigResourceType = 2
GroupResource ConfigResourceType = 3
ClusterResource ConfigResourceType = 4
BrokerResource ConfigResourceType = 5
)

View file

@ -441,40 +441,41 @@ func (child *partitionConsumer) HighWaterMarkOffset() int64 {
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
msgSent := false
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
firstAttempt := true
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
for i, msg := range msgs {
messageSelect:
select {
case child.messages <- msg:
msgSent = true
firstAttempt = true
case <-expiryTicker.C:
if !msgSent {
if !firstAttempt {
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
expiryTicker.Stop()
continue feederLoop
} else {
// current message has not been sent, return to select
// statement
msgSent = false
firstAttempt = false
goto messageSelect
}
}
}
expiryTicker.Stop()
child.broker.acks.Done()
}
expiryTicker.Stop()
close(child.messages)
close(child.errors)
}
@ -523,6 +524,7 @@ func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMes
var messages []*ConsumerMessage
var incomplete bool
prelude := true
originalOffset := child.offset
for _, rec := range batch.Records {
offset := batch.FirstOffset + rec.OffsetDelta
@ -547,9 +549,15 @@ func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMes
}
}
if incomplete || len(messages) == 0 {
if incomplete {
return nil, ErrIncompleteResponse
}
child.offset = batch.FirstOffset + int64(batch.LastOffsetDelta) + 1
if child.offset <= originalOffset {
return nil, ErrConsumerOffsetNotAdvanced
}
return messages, nil
}
@ -563,12 +571,12 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
return nil, block.Err
}
nRecs, err := block.Records.numRecords()
nRecs, err := block.numRecords()
if err != nil {
return nil, err
}
if nRecs == 0 {
partialTrailingMessage, err := block.Records.isPartial()
partialTrailingMessage, err := block.isPartial()
if err != nil {
return nil, err
}
@ -594,14 +602,33 @@ func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*Consu
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
if control, err := block.Records.isControl(); err != nil || control {
return nil, err
messages := []*ConsumerMessage{}
for _, records := range block.RecordsSet {
if control, err := records.isControl(); err != nil || control {
continue
}
switch records.recordsType {
case legacyRecords:
messageSetMessages, err := child.parseMessages(records.msgSet)
if err != nil {
return nil, err
}
messages = append(messages, messageSetMessages...)
case defaultRecords:
recordBatchMessages, err := child.parseRecords(records.recordBatch)
if err != nil {
return nil, err
}
messages = append(messages, recordBatchMessages...)
default:
return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
}
}
if block.Records.recordsType == legacyRecords {
return child.parseMessages(block.Records.msgSet)
}
return child.parseRecords(block.Records.recordBatch)
return messages, nil
}
// brokerConsumer

View file

@ -389,10 +389,12 @@ func TestConsumerExtraOffsets(t *testing.T) {
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 2)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 3)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 4)
newFetchResponse.SetLastOffsetDelta("my_topic", 0, 4)
newFetchResponse.SetLastStableOffset("my_topic", 0, 4)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16
cfg := NewConfig()
cfg.Consumer.Return.Errors = true
if fetchResponse1.Version >= 4 {
cfg.Version = V0_11_0_0
offsetResponseVersion = 1
@ -426,8 +428,19 @@ func TestConsumerExtraOffsets(t *testing.T) {
// Then: messages with offsets 1 and 2 are not returned even though they
// are present in the response.
assertMessageOffset(t, <-consumer.Messages(), 3)
assertMessageOffset(t, <-consumer.Messages(), 4)
select {
case msg := <-consumer.Messages():
assertMessageOffset(t, msg, 3)
case err := <-consumer.Errors():
t.Fatal(err)
}
select {
case msg := <-consumer.Messages():
assertMessageOffset(t, msg, 4)
case err := <-consumer.Errors():
t.Fatal(err)
}
safeClose(t, consumer)
safeClose(t, master)
@ -490,6 +503,7 @@ func TestConsumerNonSequentialOffsets(t *testing.T) {
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 5)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 7)
newFetchResponse.AddRecord("my_topic", 0, nil, testMsg, 11)
newFetchResponse.SetLastOffsetDelta("my_topic", 0, 11)
newFetchResponse.SetLastStableOffset("my_topic", 0, 11)
for _, fetchResponse1 := range []*FetchResponse{legacyFetchResponse, newFetchResponse} {
var offsetResponseVersion int16

View file

@ -0,0 +1,174 @@
package sarama
import (
"time"
)
type CreateTopicsRequest struct {
Version int16
TopicDetails map[string]*TopicDetail
Timeout time.Duration
ValidateOnly bool
}
func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
return err
}
for topic, detail := range c.TopicDetails {
if err := pe.putString(topic); err != nil {
return err
}
if err := detail.encode(pe); err != nil {
return err
}
}
pe.putInt32(int32(c.Timeout / time.Millisecond))
if c.Version >= 1 {
pe.putBool(c.ValidateOnly)
}
return nil
}
func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicDetails = make(map[string]*TopicDetail, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicDetails[topic] = new(TopicDetail)
if err = c.TopicDetails[topic].decode(pd, version); err != nil {
return err
}
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
c.Timeout = time.Duration(timeout) * time.Millisecond
if version >= 1 {
c.ValidateOnly, err = pd.getBool()
if err != nil {
return err
}
c.Version = version
}
return nil
}
func (c *CreateTopicsRequest) key() int16 {
return 19
}
func (c *CreateTopicsRequest) version() int16 {
return c.Version
}
func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicDetail struct {
NumPartitions int32
ReplicationFactor int16
ReplicaAssignment map[int32][]int32
ConfigEntries map[string]*string
}
func (t *TopicDetail) encode(pe packetEncoder) error {
pe.putInt32(t.NumPartitions)
pe.putInt16(t.ReplicationFactor)
if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
return err
}
for partition, assignment := range t.ReplicaAssignment {
pe.putInt32(partition)
if err := pe.putInt32Array(assignment); err != nil {
return err
}
}
if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
return err
}
for configKey, configValue := range t.ConfigEntries {
if err := pe.putString(configKey); err != nil {
return err
}
if err := pe.putNullableString(configValue); err != nil {
return err
}
}
return nil
}
func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
if t.NumPartitions, err = pd.getInt32(); err != nil {
return err
}
if t.ReplicationFactor, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ReplicaAssignment = make(map[int32][]int32, n)
for i := 0; i < n; i++ {
replica, err := pd.getInt32()
if err != nil {
return err
}
if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
return err
}
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
if n > 0 {
t.ConfigEntries = make(map[string]*string, n)
for i := 0; i < n; i++ {
configKey, err := pd.getString()
if err != nil {
return err
}
if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,50 @@
package sarama
import (
"testing"
"time"
)
var (
createTopicsRequestV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
255, 255, 255, 255,
255, 255,
0, 0, 0, 1, // 1 replica assignment
0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2,
0, 0, 0, 1, // 1 config
0, 12, 'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
0, 2, '-', '1',
0, 0, 0, 100,
}
createTopicsRequestV1 = append(createTopicsRequestV0, byte(1))
)
func TestCreateTopicsRequest(t *testing.T) {
retention := "-1"
req := &CreateTopicsRequest{
TopicDetails: map[string]*TopicDetail{
"topic": {
NumPartitions: -1,
ReplicationFactor: -1,
ReplicaAssignment: map[int32][]int32{
0: []int32{0, 1, 2},
},
ConfigEntries: map[string]*string{
"retention.ms": &retention,
},
},
},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "version 0", req, createTopicsRequestV0)
req.Version = 1
req.ValidateOnly = true
testRequest(t, "version 1", req, createTopicsRequestV1)
}

View file

@ -0,0 +1,112 @@
package sarama
import "time"
type CreateTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrors map[string]*TopicError
}
func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
if c.Version >= 2 {
pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
return err
}
for topic, topicError := range c.TopicErrors {
if err := pe.putString(topic); err != nil {
return err
}
if err := topicError.encode(pe, c.Version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
c.Version = version
if version >= 2 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
c.TopicErrors = make(map[string]*TopicError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
c.TopicErrors[topic] = new(TopicError)
if err := c.TopicErrors[topic].decode(pd, version); err != nil {
return err
}
}
return nil
}
func (c *CreateTopicsResponse) key() int16 {
return 19
}
func (c *CreateTopicsResponse) version() int16 {
return c.Version
}
func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
switch c.Version {
case 2:
return V1_0_0_0
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}
type TopicError struct {
Err KError
ErrMsg *string
}
func (t *TopicError) encode(pe packetEncoder, version int16) error {
pe.putInt16(int16(t.Err))
if version >= 1 {
if err := pe.putNullableString(t.ErrMsg); err != nil {
return err
}
}
return nil
}
func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
kErr, err := pd.getInt16()
if err != nil {
return err
}
t.Err = KError(kErr)
if version >= 1 {
if t.ErrMsg, err = pd.getNullableString(); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,52 @@
package sarama
import (
"testing"
"time"
)
var (
createTopicsResponseV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
}
createTopicsResponseV1 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
0, 3, 'm', 's', 'g',
}
createTopicsResponseV2 = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 42,
0, 3, 'm', 's', 'g',
}
)
func TestCreateTopicsResponse(t *testing.T) {
resp := &CreateTopicsResponse{
TopicErrors: map[string]*TopicError{
"topic": &TopicError{
Err: ErrInvalidRequest,
},
},
}
testResponse(t, "version 0", resp, createTopicsResponseV0)
resp.Version = 1
msg := "msg"
resp.TopicErrors["topic"].ErrMsg = &msg
testResponse(t, "version 1", resp, createTopicsResponseV1)
resp.Version = 2
resp.ThrottleTime = 100 * time.Millisecond
testResponse(t, "version 2", resp, createTopicsResponseV2)
}

View file

@ -0,0 +1,41 @@
package sarama
import "time"
type DeleteTopicsRequest struct {
Topics []string
Timeout time.Duration
}
func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
if err := pe.putStringArray(d.Topics); err != nil {
return err
}
pe.putInt32(int32(d.Timeout / time.Millisecond))
return nil
}
func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
if d.Topics, err = pd.getStringArray(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
d.Timeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (d *DeleteTopicsRequest) key() int16 {
return 20
}
func (d *DeleteTopicsRequest) version() int16 {
return 0
}
func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
return V0_10_1_0
}

View file

@ -0,0 +1,22 @@
package sarama
import (
"testing"
"time"
)
var deleteTopicsRequest = []byte{
0, 0, 0, 2,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 5, 'o', 't', 'h', 'e', 'r',
0, 0, 0, 100,
}
func TestDeleteTopicsRequest(t *testing.T) {
req := &DeleteTopicsRequest{
Topics: []string{"topic", "other"},
Timeout: 100 * time.Millisecond,
}
testRequest(t, "", req, deleteTopicsRequest)
}

View file

@ -0,0 +1,78 @@
package sarama
import "time"
type DeleteTopicsResponse struct {
Version int16
ThrottleTime time.Duration
TopicErrorCodes map[string]KError
}
func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
if d.Version >= 1 {
pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
}
if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
return err
}
for topic, errorCode := range d.TopicErrorCodes {
if err := pe.putString(topic); err != nil {
return err
}
pe.putInt16(int16(errorCode))
}
return nil
}
func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
if version >= 1 {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
d.Version = version
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
d.TopicErrorCodes = make(map[string]KError, n)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
errorCode, err := pd.getInt16()
if err != nil {
return err
}
d.TopicErrorCodes[topic] = KError(errorCode)
}
return nil
}
func (d *DeleteTopicsResponse) key() int16 {
return 20
}
func (d *DeleteTopicsResponse) version() int16 {
return d.Version
}
func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
switch d.Version {
case 1:
return V0_11_0_0
default:
return V0_10_1_0
}
}

View file

@ -0,0 +1,36 @@
package sarama
import (
"testing"
"time"
)
var (
deleteTopicsResponseV0 = []byte{
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0,
}
deleteTopicsResponseV1 = []byte{
0, 0, 0, 100,
0, 0, 0, 1,
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0,
}
)
func TestDeleteTopicsResponse(t *testing.T) {
resp := &DeleteTopicsResponse{
TopicErrorCodes: map[string]KError{
"topic": ErrNoError,
},
}
testResponse(t, "version 0", resp, deleteTopicsResponseV0)
resp.Version = 1
resp.ThrottleTime = 100 * time.Millisecond
testResponse(t, "version 1", resp, deleteTopicsResponseV1)
}

View file

@ -0,0 +1,91 @@
package sarama
type ConfigResource struct {
Type ConfigResourceType
Name string
ConfigNames []string
}
type DescribeConfigsRequest struct {
Resources []*ConfigResource
}
func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
pe.putInt8(int8(c.Type))
if err := pe.putString(c.Name); err != nil {
return err
}
if len(c.ConfigNames) == 0 {
pe.putInt32(-1)
continue
}
if err := pe.putStringArray(c.ConfigNames); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ConfigResource, n)
for i := 0; i < n; i++ {
r.Resources[i] = &ConfigResource{}
t, err := pd.getInt8()
if err != nil {
return err
}
r.Resources[i].Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Resources[i].Name = name
confLength, err := pd.getArrayLength()
if err != nil {
return err
}
if confLength == -1 {
continue
}
cfnames := make([]string, confLength)
for i := 0; i < confLength; i++ {
s, err := pd.getString()
if err != nil {
return err
}
cfnames[i] = s
}
r.Resources[i].ConfigNames = cfnames
}
return nil
}
func (r *DescribeConfigsRequest) key() int16 {
return 32
}
func (r *DescribeConfigsRequest) version() int16 {
return 0
}
func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,90 @@
package sarama
import "testing"
var (
emptyDescribeConfigsRequest = []byte{
0, 0, 0, 0, // 0 configs
}
singleDescribeConfigsRequest = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 1, //1 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
}
doubleDescribeConfigsRequest = []byte{
0, 0, 0, 2, // 2 configs
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
0, 0, 0, 2, //2 config name
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 12, // 12 chars
'r', 'e', 't', 'e', 'n', 't', 'i', 'o', 'n', '.', 'm', 's',
2, // a topic
0, 3, 'b', 'a', 'r', // topic name: foo
0, 0, 0, 1, // 1 config
0, 10, // 10 chars
's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
}
singleDescribeConfigsRequestAllConfigs = []byte{
0, 0, 0, 1, // 1 config
2, // a topic
0, 3, 'f', 'o', 'o', // topic name: foo
255, 255, 255, 255, // no configs
}
)
func TestDescribeConfigsRequest(t *testing.T) {
var request *DescribeConfigsRequest
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{},
}
testRequest(t, "no requests", request, emptyDescribeConfigsRequest)
configs := []string{"segment.ms"}
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
ConfigNames: configs,
},
},
}
testRequest(t, "one config", request, singleDescribeConfigsRequest)
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
ConfigNames: []string{"segment.ms", "retention.ms"},
},
&ConfigResource{
Type: TopicResource,
Name: "bar",
ConfigNames: []string{"segment.ms"},
},
},
}
testRequest(t, "two configs", request, doubleDescribeConfigsRequest)
request = &DescribeConfigsRequest{
Resources: []*ConfigResource{
&ConfigResource{
Type: TopicResource,
Name: "foo",
},
},
}
testRequest(t, "one topic, all configs", request, singleDescribeConfigsRequestAllConfigs)
}

View file

@ -0,0 +1,188 @@
package sarama
import "time"
type DescribeConfigsResponse struct {
ThrottleTime time.Duration
Resources []*ResourceResponse
}
type ResourceResponse struct {
ErrorCode int16
ErrorMsg string
Type ConfigResourceType
Name string
Configs []*ConfigEntry
}
type ConfigEntry struct {
Name string
Value string
ReadOnly bool
Default bool
Sensitive bool
}
func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
if err = pe.putArrayLength(len(r.Resources)); err != nil {
return err
}
for _, c := range r.Resources {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Resources = make([]*ResourceResponse, n)
for i := 0; i < n; i++ {
rr := &ResourceResponse{}
if err := rr.decode(pd, version); err != nil {
return err
}
r.Resources[i] = rr
}
return nil
}
func (r *DescribeConfigsResponse) key() int16 {
return 32
}
func (r *DescribeConfigsResponse) version() int16 {
return 0
}
func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}
func (r *ResourceResponse) encode(pe packetEncoder) (err error) {
pe.putInt16(r.ErrorCode)
if err = pe.putString(r.ErrorMsg); err != nil {
return err
}
pe.putInt8(int8(r.Type))
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putArrayLength(len(r.Configs)); err != nil {
return err
}
for _, c := range r.Configs {
if err = c.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
ec, err := pd.getInt16()
if err != nil {
return err
}
r.ErrorCode = ec
em, err := pd.getString()
if err != nil {
return err
}
r.ErrorMsg = em
t, err := pd.getInt8()
if err != nil {
return err
}
r.Type = ConfigResourceType(t)
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Configs = make([]*ConfigEntry, n)
for i := 0; i < n; i++ {
c := &ConfigEntry{}
if err := c.decode(pd, version); err != nil {
return err
}
r.Configs[i] = c
}
return nil
}
func (r *ConfigEntry) encode(pe packetEncoder) (err error) {
if err = pe.putString(r.Name); err != nil {
return err
}
if err = pe.putString(r.Value); err != nil {
return err
}
pe.putBool(r.ReadOnly)
pe.putBool(r.Default)
pe.putBool(r.Sensitive)
return nil
}
func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
name, err := pd.getString()
if err != nil {
return err
}
r.Name = name
value, err := pd.getString()
if err != nil {
return err
}
r.Value = value
read, err := pd.getBool()
if err != nil {
return err
}
r.ReadOnly = read
de, err := pd.getBool()
if err != nil {
return err
}
r.Default = de
sensitive, err := pd.getBool()
if err != nil {
return err
}
r.Sensitive = sensitive
return nil
}

View file

@ -0,0 +1,60 @@
package sarama
import (
"testing"
)
var (
describeConfigsResponseEmpty = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 0, // no configs
}
describeConfigsResponsePopulated = []byte{
0, 0, 0, 0, //throttle
0, 0, 0, 1, // response
0, 0, //errorcode
0, 0, //string
2, // topic
0, 3, 'f', 'o', 'o',
0, 0, 0, 1, //configs
0, 10, 's', 'e', 'g', 'm', 'e', 'n', 't', '.', 'm', 's',
0, 4, '1', '0', '0', '0',
0, // ReadOnly
0, // Default
0, // Sensitive
}
)
func TestDescribeConfigsResponse(t *testing.T) {
var response *DescribeConfigsResponse
response = &DescribeConfigsResponse{
Resources: []*ResourceResponse{},
}
testVersionDecodable(t, "empty", response, describeConfigsResponseEmpty, 0)
if len(response.Resources) != 0 {
t.Error("Expected no groups")
}
response = &DescribeConfigsResponse{
Resources: []*ResourceResponse{
&ResourceResponse{
ErrorCode: 0,
ErrorMsg: "",
Type: TopicResource,
Name: "foo",
Configs: []*ConfigEntry{
&ConfigEntry{
Name: "segment.ms",
Value: "1000",
ReadOnly: false,
Default: false,
Sensitive: false,
},
},
},
},
}
testResponse(t, "response with error", response, describeConfigsResponsePopulated)
}

50
vendor/github.com/Shopify/sarama/end_txn_request.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package sarama
type EndTxnRequest struct {
TransactionalID string
ProducerID int64
ProducerEpoch int16
TransactionResult bool
}
func (a *EndTxnRequest) encode(pe packetEncoder) error {
if err := pe.putString(a.TransactionalID); err != nil {
return err
}
pe.putInt64(a.ProducerID)
pe.putInt16(a.ProducerEpoch)
pe.putBool(a.TransactionResult)
return nil
}
func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
if a.TransactionalID, err = pd.getString(); err != nil {
return err
}
if a.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if a.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
if a.TransactionResult, err = pd.getBool(); err != nil {
return err
}
return nil
}
func (a *EndTxnRequest) key() int16 {
return 26
}
func (a *EndTxnRequest) version() int16 {
return 0
}
func (a *EndTxnRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,23 @@
package sarama
import "testing"
var (
endTxnRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 0, 0, 0, 31, 64,
0, 1,
1,
}
)
func TestEndTxnRequest(t *testing.T) {
req := &EndTxnRequest{
TransactionalID: "txn",
ProducerID: 8000,
ProducerEpoch: 1,
TransactionResult: true,
}
testRequest(t, "", req, endTxnRequest)
}

44
vendor/github.com/Shopify/sarama/end_txn_response.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
package sarama
import (
"time"
)
type EndTxnResponse struct {
ThrottleTime time.Duration
Err KError
}
func (e *EndTxnResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
pe.putInt16(int16(e.Err))
return nil
}
func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
e.Err = KError(kerr)
return nil
}
func (e *EndTxnResponse) key() int16 {
return 25
}
func (e *EndTxnResponse) version() int16 {
return 0
}
func (e *EndTxnResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,22 @@
package sarama
import (
"testing"
"time"
)
var (
endTxnResponse = []byte{
0, 0, 0, 100,
0, 49,
}
)
func TestEndTxnResponse(t *testing.T) {
resp := &EndTxnResponse{
ThrottleTime: 100 * time.Millisecond,
Err: ErrInvalidProducerIDMapping,
}
testResponse(t, "", resp, endTxnResponse)
}

View file

@ -37,6 +37,10 @@ var ErrShuttingDown = errors.New("kafka: message received by producer in process
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
// a RecordBatch.
var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {

View file

@ -1,6 +1,8 @@
package sarama
import "time"
import (
"time"
)
type AbortedTransaction struct {
ProducerID int64
@ -31,7 +33,9 @@ type FetchResponseBlock struct {
HighWaterMarkOffset int64
LastStableOffset int64
AbortedTransactions []*AbortedTransaction
Records Records
Records *Records // deprecated: use FetchResponseBlock.Records
RecordsSet []*Records
Partial bool
}
func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
@ -79,15 +83,69 @@ func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error)
if err != nil {
return err
}
if recordsSize > 0 {
if err = b.Records.decode(recordsDecoder); err != nil {
b.RecordsSet = []*Records{}
for recordsDecoder.remaining() > 0 {
records := &Records{}
if err := records.decode(recordsDecoder); err != nil {
// If we have at least one decoded records, this is not an error
if err == ErrInsufficientData {
if len(b.RecordsSet) == 0 {
b.Partial = true
}
break
}
return err
}
partial, err := records.isPartial()
if err != nil {
return err
}
// If we have at least one full records, we skip incomplete ones
if partial && len(b.RecordsSet) > 0 {
break
}
b.RecordsSet = append(b.RecordsSet, records)
if b.Records == nil {
b.Records = records
}
}
return nil
}
func (b *FetchResponseBlock) numRecords() (int, error) {
sum := 0
for _, records := range b.RecordsSet {
count, err := records.numRecords()
if err != nil {
return 0, err
}
sum += count
}
return sum, nil
}
func (b *FetchResponseBlock) isPartial() (bool, error) {
if b.Partial {
return true, nil
}
if len(b.RecordsSet) == 1 {
return b.RecordsSet[0].isPartial()
}
return false, nil
}
func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
@ -107,9 +165,11 @@ func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error)
}
pe.push(&lengthField{})
err = b.Records.encode(pe)
if err != nil {
return err
for _, records := range b.RecordsSet {
err = records.encode(pe)
if err != nil {
return err
}
}
return pe.pop()
}
@ -289,11 +349,11 @@ func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Enc
kb, vb := encodeKV(key, value)
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
set := frb.Records.msgSet
if set == nil {
set = &MessageSet{}
frb.Records = newLegacyRecords(set)
if len(frb.RecordsSet) == 0 {
records := newLegacyRecords(&MessageSet{})
frb.RecordsSet = []*Records{&records}
}
set := frb.RecordsSet[0].msgSet
set.Messages = append(set.Messages, msgBlock)
}
@ -301,14 +361,24 @@ func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Enco
frb := r.getOrCreateBlock(topic, partition)
kb, vb := encodeKV(key, value)
rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
batch := frb.Records.recordBatch
if batch == nil {
batch = &RecordBatch{Version: 2}
frb.Records = newDefaultRecords(batch)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].recordBatch
batch.addRecord(rec)
}
func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
frb := r.getOrCreateBlock(topic, partition)
if len(frb.RecordsSet) == 0 {
records := newDefaultRecords(&RecordBatch{Version: 2})
frb.RecordsSet = []*Records{&records}
}
batch := frb.RecordsSet[0].recordBatch
batch.LastOffsetDelta = offset
}
func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
frb := r.getOrCreateBlock(topic, partition)
frb.LastStableOffset = offset

View file

@ -117,7 +117,7 @@ func TestOneMessageFetchResponse(t *testing.T) {
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -125,14 +125,14 @@ func TestOneMessageFetchResponse(t *testing.T) {
t.Error("Decoding detected a partial trailing message where there wasn't one.")
}
n, err := block.Records.numRecords()
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of messages.")
}
msgBlock := block.Records.msgSet.Messages[0]
msgBlock := block.RecordsSet[0].msgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}
@ -170,7 +170,7 @@ func TestOneRecordFetchResponse(t *testing.T) {
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -178,14 +178,14 @@ func TestOneRecordFetchResponse(t *testing.T) {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.Records.numRecords()
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
rec := block.Records.recordBatch.Records[0]
rec := block.RecordsSet[0].recordBatch.Records[0]
if !bytes.Equal(rec.Key, []byte{0x01, 0x02, 0x03, 0x04}) {
t.Error("Decoding produced incorrect record key.")
}
@ -216,7 +216,7 @@ func TestOneMessageFetchResponseV4(t *testing.T) {
if block.HighWaterMarkOffset != 0x10101010 {
t.Error("Decoding didn't produce correct high water mark offset.")
}
partial, err := block.Records.isPartial()
partial, err := block.isPartial()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
@ -224,14 +224,14 @@ func TestOneMessageFetchResponseV4(t *testing.T) {
t.Error("Decoding detected a partial trailing record where there wasn't one.")
}
n, err := block.Records.numRecords()
n, err := block.numRecords()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if n != 1 {
t.Fatal("Decoding produced incorrect number of records.")
}
msgBlock := block.Records.msgSet.Messages[0]
msgBlock := block.RecordsSet[0].msgSet.Messages[0]
if msgBlock.Offset != 0x550000 {
t.Error("Decoding produced incorrect message offset.")
}

View file

@ -0,0 +1,43 @@
package sarama
import "time"
type InitProducerIDRequest struct {
TransactionalID *string
TransactionTimeout time.Duration
}
func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
if err := pe.putNullableString(i.TransactionalID); err != nil {
return err
}
pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
return nil
}
func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
if i.TransactionalID, err = pd.getNullableString(); err != nil {
return err
}
timeout, err := pd.getInt32()
if err != nil {
return err
}
i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
return nil
}
func (i *InitProducerIDRequest) key() int16 {
return 22
}
func (i *InitProducerIDRequest) version() int16 {
return 0
}
func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,31 @@
package sarama
import (
"testing"
"time"
)
var (
initProducerIDRequestNull = []byte{
255, 255,
0, 0, 0, 100,
}
initProducerIDRequest = []byte{
0, 3, 't', 'x', 'n',
0, 0, 0, 100,
}
)
func TestInitProducerIDRequest(t *testing.T) {
req := &InitProducerIDRequest{
TransactionTimeout: 100 * time.Millisecond,
}
testRequest(t, "null transaction id", req, initProducerIDRequestNull)
transactionID := "txn"
req.TransactionalID = &transactionID
testRequest(t, "transaction id", req, initProducerIDRequest)
}

View file

@ -0,0 +1,55 @@
package sarama
import "time"
type InitProducerIDResponse struct {
ThrottleTime time.Duration
Err KError
ProducerID int64
ProducerEpoch int16
}
func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
pe.putInt16(int16(i.Err))
pe.putInt64(i.ProducerID)
pe.putInt16(i.ProducerEpoch)
return nil
}
func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
kerr, err := pd.getInt16()
if err != nil {
return err
}
i.Err = KError(kerr)
if i.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if i.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
return nil
}
func (i *InitProducerIDResponse) key() int16 {
return 22
}
func (i *InitProducerIDResponse) version() int16 {
return 0
}
func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,37 @@
package sarama
import (
"testing"
"time"
)
var (
initProducerIDResponse = []byte{
0, 0, 0, 100,
0, 0,
0, 0, 0, 0, 0, 0, 31, 64, // producerID = 8000
0, 0, // epoch
}
initProducerIDRequestError = []byte{
0, 0, 0, 100,
0, 51,
255, 255, 255, 255, 255, 255, 255, 255,
0, 0,
}
)
func TestInitProducerIDResponse(t *testing.T) {
resp := &InitProducerIDResponse{
ThrottleTime: 100 * time.Millisecond,
ProducerID: 8000,
ProducerEpoch: 0,
}
testResponse(t, "", resp, initProducerIDResponse)
resp.Err = ErrConcurrentTransactions
resp.ProducerID = -1
testResponse(t, "with error", resp, initProducerIDRequestError)
}

View file

@ -64,6 +64,19 @@ func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
magic, err := magicValue(pd)
if err != nil {
if err == ErrInsufficientData {
ms.PartialTrailingMessage = true
return nil
}
return err
}
if magic > 1 {
return nil
}
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {

View file

@ -288,6 +288,15 @@ func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
listener, err := net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
return NewMockBrokerListener(t, brokerID, listener)
}
// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
var err error
broker := &MockBroker{
@ -296,13 +305,10 @@ func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
listener: listener,
}
broker.handler = broker.defaultRequestHandler
broker.listener, err = net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {

View file

@ -384,14 +384,20 @@ func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int3
// MockProduceResponse is a `ProduceResponse` builder.
type MockProduceResponse struct {
errors map[string]map[int32]KError
t TestReporter
version int16
errors map[string]map[int32]KError
t TestReporter
}
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
return &MockProduceResponse{t: t}
}
func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
mr.version = version
return mr
}
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
@ -407,7 +413,9 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
res := &ProduceResponse{
Version: mr.version,
}
for topic, partitions := range req.records {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))

View file

@ -48,9 +48,9 @@ var (
0x00, 0x00, 0x00, 0x46,
0x00, 0x00, 0x00, 0x00,
0x02,
0x54, 0x79, 0x61, 0xFD,
0xCA, 0x33, 0xBC, 0x05,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x01, 0x58, 0x8D, 0xCD, 0x59, 0x38,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@ -83,9 +83,10 @@ func TestProduceRequest(t *testing.T) {
request.Version = 3
batch := &RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 1,
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{0x01, 0x02, 0x03, 0x04},

View file

@ -122,7 +122,15 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
if req.Version >= 3 {
req.AddBatch(topic, partition, set.recordsToSend.recordBatch)
rb := set.recordsToSend.recordBatch
if len(rb.Records) > 0 {
rb.LastOffsetDelta = int32(len(rb.Records) - 1)
for i, record := range rb.Records {
record.OffsetDelta = int64(i)
}
}
req.AddBatch(topic, partition, rb)
continue
}
if ps.parent.conf.Producer.Compression == CompressionNone {
@ -132,6 +140,17 @@ func (ps *produceSet) buildRequest() *ProduceRequest {
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
// If our version is 0.10 or later, assign relative offsets
// to the inner messages. This lets the broker avoid
// recompressing the message set.
// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
// for details on relative offsets.)
for i, msg := range set.recordsToSend.msgSet.Messages {
msg.Offset = int64(i)
}
}
payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.

View file

@ -173,11 +173,14 @@ func TestProduceSetCompressedRequestBuilding(t *testing.T) {
if err != nil {
t.Error("Failed to decode set from payload")
}
for _, compMsgBlock := range msg.Set.Messages {
for i, compMsgBlock := range msg.Set.Messages {
compMsg := compMsgBlock.Msg
if compMsg.Version != 1 {
t.Error("Wrong compressed message version")
}
if compMsgBlock.Offset != int64(i) {
t.Errorf("Wrong relative inner offset, expected %d, got %d", i, compMsgBlock.Offset)
}
}
if msg.Version != 1 {
t.Error("Wrong compressed parent message version")
@ -234,6 +237,10 @@ func TestProduceSetV3RequestBuilding(t *testing.T) {
t.Errorf("Wrong timestamp delta: %v", rec.TimestampDelta)
}
if rec.OffsetDelta != int64(i) {
t.Errorf("Wrong relative inner offset, expected %d, got %d", i, rec.OffsetDelta)
}
for j, h := range batch.Records[i].Headers {
exp := fmt.Sprintf("header-%d", j+1)
if string(h.Key) != exp {

View file

@ -208,21 +208,15 @@ func (b *RecordBatch) decode(pd packetDecoder) (err error) {
func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
var raw []byte
if b.Codec != CompressionNone {
var err error
if raw, err = encode(recordsArray(b.Records), nil); err != nil {
return err
}
b.recordsLen = len(raw)
var err error
if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
return err
}
b.recordsLen = len(raw)
switch b.Codec {
case CompressionNone:
offset := pe.offset()
if err := recordsArray(b.Records).encode(pe); err != nil {
return err
}
b.recordsLen = pe.offset() - offset
b.compressedRecords = raw
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)

View file

@ -69,9 +69,10 @@ var recordBatchTestCases = []struct {
{
name: "uncompressed record",
batch: RecordBatch{
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Version: 2,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
@ -115,10 +116,11 @@ var recordBatchTestCases = []struct {
{
name: "gzipped record",
batch: RecordBatch{
Version: 2,
Codec: CompressionGZIP,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Version: 2,
Codec: CompressionGZIP,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
@ -168,10 +170,11 @@ var recordBatchTestCases = []struct {
{
name: "snappy compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionSnappy,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Version: 2,
Codec: CompressionSnappy,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},
@ -203,10 +206,11 @@ var recordBatchTestCases = []struct {
{
name: "lz4 compressed record",
batch: RecordBatch{
Version: 2,
Codec: CompressionLZ4,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
Version: 2,
Codec: CompressionLZ4,
FirstTimestamp: time.Unix(1479847795, 0),
MaxTimestamp: time.Unix(0, 0),
LastOffsetDelta: 0,
Records: []*Record{{
TimestampDelta: 5 * time.Millisecond,
Key: []byte{1, 2, 3, 4},

View file

@ -62,16 +62,12 @@ func (r *Records) encode(pe packetEncoder) error {
}
return r.recordBatch.encode(pe)
}
return fmt.Errorf("unknown records type: %v", r.recordsType)
}
func (r *Records) setTypeFromMagic(pd packetDecoder) error {
dec, err := pd.peek(magicOffset, magicLength)
if err != nil {
return err
}
magic, err := dec.getInt8()
magic, err := magicValue(pd)
if err != nil {
return err
}
@ -80,13 +76,14 @@ func (r *Records) setTypeFromMagic(pd packetDecoder) error {
if magic < 2 {
r.recordsType = legacyRecords
}
return nil
}
func (r *Records) decode(pd packetDecoder) error {
if r.recordsType == unknownRecords {
if err := r.setTypeFromMagic(pd); err != nil {
return nil
return err
}
}
@ -165,3 +162,12 @@ func (r *Records) isControl() (bool, error) {
}
return false, fmt.Errorf("unknown records type: %v", r.recordsType)
}
func magicValue(pd packetDecoder) (int8, error) {
dec, err := pd.peek(magicOffset, magicLength)
if err != nil {
return 0, err
}
return dec.getInt8()
}

View file

@ -114,6 +114,30 @@ func allocateBody(key, version int16) protocolBody {
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
case 19:
return &CreateTopicsRequest{}
case 20:
return &DeleteTopicsRequest{}
case 22:
return &InitProducerIDRequest{}
case 24:
return &AddPartitionsToTxnRequest{}
case 25:
return &AddOffsetsToTxnRequest{}
case 26:
return &EndTxnRequest{}
case 28:
return &TxnOffsetCommitRequest{}
case 29:
return &DescribeAclsRequest{}
case 30:
return &CreateAclsRequest{}
case 31:
return &DeleteAclsRequest{}
case 32:
return &DescribeConfigsRequest{}
case 33:
return &AlterConfigsRequest{}
case 37:
return &CreatePartitionsRequest{}
}

View file

@ -96,3 +96,5 @@ func testResponse(t *testing.T, name string, res protocolBody, expected []byte)
t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded)
}
}
func nullString(s string) *string { return &s }

View file

@ -0,0 +1,126 @@
package sarama
type TxnOffsetCommitRequest struct {
TransactionalID string
GroupID string
ProducerID int64
ProducerEpoch int16
Topics map[string][]*PartitionOffsetMetadata
}
func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
if err := pe.putString(t.TransactionalID); err != nil {
return err
}
if err := pe.putString(t.GroupID); err != nil {
return err
}
pe.putInt64(t.ProducerID)
pe.putInt16(t.ProducerEpoch)
if err := pe.putArrayLength(len(t.Topics)); err != nil {
return err
}
for topic, partitions := range t.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for _, partition := range partitions {
if err := partition.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
if t.TransactionalID, err = pd.getString(); err != nil {
return err
}
if t.GroupID, err = pd.getString(); err != nil {
return err
}
if t.ProducerID, err = pd.getInt64(); err != nil {
return err
}
if t.ProducerEpoch, err = pd.getInt16(); err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics = make(map[string][]*PartitionOffsetMetadata)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
for j := 0; j < m; j++ {
partitionOffsetMetadata := new(PartitionOffsetMetadata)
if err := partitionOffsetMetadata.decode(pd, version); err != nil {
return err
}
t.Topics[topic][j] = partitionOffsetMetadata
}
}
return nil
}
func (a *TxnOffsetCommitRequest) key() int16 {
return 28
}
func (a *TxnOffsetCommitRequest) version() int16 {
return 0
}
func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
return V0_11_0_0
}
type PartitionOffsetMetadata struct {
Partition int32
Offset int64
Metadata *string
}
func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
pe.putInt32(p.Partition)
pe.putInt64(p.Offset)
if err := pe.putNullableString(p.Metadata); err != nil {
return err
}
return nil
}
func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
if p.Partition, err = pd.getInt32(); err != nil {
return err
}
if p.Offset, err = pd.getInt64(); err != nil {
return err
}
if p.Metadata, err = pd.getNullableString(); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,35 @@
package sarama
import "testing"
var (
txnOffsetCommitRequest = []byte{
0, 3, 't', 'x', 'n',
0, 7, 'g', 'r', 'o', 'u', 'p', 'i', 'd',
0, 0, 0, 0, 0, 0, 31, 64, // producer ID
0, 1, // producer epoch
0, 0, 0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition
0, 0, 0, 2, // partition no 2
0, 0, 0, 0, 0, 0, 0, 123,
255, 255, // no meta data
}
)
func TestTxnOffsetCommitRequest(t *testing.T) {
req := &TxnOffsetCommitRequest{
TransactionalID: "txn",
GroupID: "groupid",
ProducerID: 8000,
ProducerEpoch: 1,
Topics: map[string][]*PartitionOffsetMetadata{
"topic": []*PartitionOffsetMetadata{{
Offset: 123,
Partition: 2,
}},
},
}
testRequest(t, "", req, txnOffsetCommitRequest)
}

View file

@ -0,0 +1,83 @@
package sarama
import (
"time"
)
type TxnOffsetCommitResponse struct {
ThrottleTime time.Duration
Topics map[string][]*PartitionError
}
func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
if err := pe.putArrayLength(len(t.Topics)); err != nil {
return err
}
for topic, e := range t.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(e)); err != nil {
return err
}
for _, partitionError := range e {
if err := partitionError.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
throttleTime, err := pd.getInt32()
if err != nil {
return err
}
t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
n, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics = make(map[string][]*PartitionError)
for i := 0; i < n; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
m, err := pd.getArrayLength()
if err != nil {
return err
}
t.Topics[topic] = make([]*PartitionError, m)
for j := 0; j < m; j++ {
t.Topics[topic][j] = new(PartitionError)
if err := t.Topics[topic][j].decode(pd, version); err != nil {
return err
}
}
}
return nil
}
func (a *TxnOffsetCommitResponse) key() int16 {
return 28
}
func (a *TxnOffsetCommitResponse) version() int16 {
return 0
}
func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
return V0_11_0_0
}

View file

@ -0,0 +1,31 @@
package sarama
import (
"testing"
"time"
)
var (
txnOffsetCommitResponse = []byte{
0, 0, 0, 100,
0, 0, 0, 1, // 1 topic
0, 5, 't', 'o', 'p', 'i', 'c',
0, 0, 0, 1, // 1 partition response
0, 0, 0, 2, // partition number 2
0, 47, // err
}
)
func TestTxnOffsetCommitResponse(t *testing.T) {
resp := &TxnOffsetCommitResponse{
ThrottleTime: 100 * time.Millisecond,
Topics: map[string][]*PartitionError{
"topic": []*PartitionError{{
Partition: 2,
Err: ErrInvalidProducerEpoch,
}},
},
}
testResponse(t, "", resp, txnOffsetCommitResponse)
}

View file

@ -1,3 +1,248 @@
Release v1.13.4 (2018-02-23)
===
### Service Client Updates
* `service/appstream`: Updates service API and documentation
* This API update is to enable customers to copy their Amazon AppStream 2.0 images within and between AWS Regions
* `aws/endpoints`: Updated Regions and Endpoints metadata.
Release v1.13.3 (2018-02-22)
===
### Service Client Updates
* `service/ce`: Updates service API and documentation
* `service/elasticloadbalancingv2`: Updates service documentation
* `aws/endpoints`: Updated Regions and Endpoints metadata.
Release v1.13.2 (2018-02-21)
===
### Service Client Updates
* `service/codecommit`: Updates service API and documentation
* This release adds an API for adding a file directly to an AWS CodeCommit repository without requiring a Git client.
* `service/ec2`: Updates service API and documentation
* Adds support for tagging an EBS snapshot as part of the API call that creates the EBS snapshot
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/serverlessrepo`: Updates service API, documentation, and paginators
Release v1.13.1 (2018-02-20)
===
### Service Client Updates
* `service/autoscaling`: Updates service API and documentation
* Amazon EC2 Auto Scaling support for service-linked roles
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/waf`: Updates service API and documentation
* The new PermissionPolicy APIs in AWS WAF Regional allow customers to attach resource-based policies to their entities.
* `service/waf-regional`: Updates service API and documentation
Release v1.13.0 (2018-02-19)
===
### Service Client Updates
* `service/config`: Updates service API
* With this release, AWS Config updated the ConfigurationItemStatus enum values. The values prior to this update did not represent appropriate values returned by GetResourceConfigHistory. You must update your code to enumerate the new enum values so this is a breaking change. To map old properties to new properties, use the following descriptions: New discovered resource - Old property: Discovered, New property: ResourceDiscovered. Updated resource - Old property: Ok, New property: OK. Deleted resource - Old property: Deleted, New property: ResourceDeleted or ResourceDeletedNotRecorded. Not-recorded resource - Old property: N/A, New property: ResourceNotRecorded or ResourceDeletedNotRecorded.
Release v1.12.79 (2018-02-16)
===
### Service Client Updates
* `service/rds`: Updates service API and documentation
* Updates RDS API to indicate whether a DBEngine supports read replicas.
Release v1.12.78 (2018-02-15)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/gamelift`: Updates service API and documentation
* Updates to allow Fleets to run on On-Demand or Spot instances.
* `service/mediaconvert`: Updates service API and documentation
* Nielsen ID3 tags can now be inserted into transport stream (TS) and HLS outputs. For more information on Nielsen configuration you can go to https://docs.aws.amazon.com/mediaconvert/latest/apireference/jobs.html#jobs-nielsenconfiguration
Release v1.12.77 (2018-02-14)
===
### Service Client Updates
* `service/appsync`: Updates service API and documentation
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/lex-models`: Updates service API and documentation
Release v1.12.76 (2018-02-13)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/glacier`: Updates service documentation
* Documentation updates for glacier
* `service/route53`: Updates service API
* Added support for creating Private Hosted Zones and metric-based healthchecks in the ap-northeast-3 region for whitelisted customers.
Release v1.12.75 (2018-02-12)
===
### Service Client Updates
* `service/cognito-idp`: Updates service API and documentation
* `service/ec2`: Updates service API and documentation
* Network interfaces now supply the following additional status of "associated" to better distinguish the current status.
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/guardduty`: Updates service API and documentation
* Added PortProbeAction information to the Action section of the port probe-type finding.
* `service/kms`: Updates service API
* This release of AWS Key Management Service includes support for InvalidArnException in the RetireGrant API.
* `service/rds`: Updates service documentation
* Aurora MySQL now supports MySQL 5.7.
Release v1.12.74 (2018-02-09)
===
### Service Client Updates
* `service/ec2`: Updates service API and documentation
* Users can now better understand the longer ID opt-in status of their account using the two new APIs DescribeAggregateIdFormat and DescribePrincipalIdFormat
* `service/lex-models`: Updates service API and documentation
* `service/runtime.lex`: Updates service API and documentation
Release v1.12.73 (2018-02-08)
===
### Service Client Updates
* `service/appstream`: Updates service API and documentation
* Adds support for allowing customers to provide a redirect URL for a stack. Users will be redirected to the link provided by the admin at the end of their streaming session.
* `service/budgets`: Updates service API and documentation
* Making budgetLimit and timePeriod optional, and updating budgets docs.
* `service/dms`: Updates service API, documentation, and paginators
* This release includes the addition of two new APIs: describe replication instance task logs and reboot instance. The first allows user to see how much storage each log for a task on a given instance is occupying. The second gives users the option to reboot the application software on the instance and force a fail over for MAZ instances to test robustness of their integration with our service.
* `service/ds`: Updates service API
* Updated the regex of some input parameters to support longer EC2 identifiers.
* `service/dynamodb`: Updates service API and documentation
* Amazon DynamoDB now supports server-side encryption using a default service key (alias/aws/dynamodb) from the AWS Key Management Service (KMS). AWS KMS is a service that combines secure, highly available hardware and software to provide a key management system scaled for the cloud. AWS KMS is used via the AWS Management Console or APIs to centrally create encryption keys, define the policies that control how keys can be used, and audit key usage to prove they are being used correctly. For more information, see the Amazon DynamoDB Developer Guide.
* `service/gamelift`: Updates service API and documentation
* Amazon GameLift FlexMatch added the StartMatchBackfill API. This API allows developers to add new players to an existing game session using the same matchmaking rules and player data that were used to initially create the session.
* `service/medialive`: Updates service API and documentation
* AWS Elemental MediaLive has added support for updating channel settings for idle channels. You can now update channel name, channel outputs and output destinations, encoder settings, user role ARN, and input specifications. Channel settings can be updated in the console or with API calls. Please note that running channels need to be stopped before they can be updated. We've also deprecated the 'Reserved' field.
* `service/mediastore`: Updates service API and documentation
* AWS Elemental MediaStore now supports per-container CORS configuration.
Release v1.12.72 (2018-02-07)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/glue`: Updates service API and documentation
* This new feature will now allow customers to add a customized json classifier. They can specify a json path to indicate the object, array or field of the json documents they'd like crawlers to inspect when they crawl json files.
* `service/servicecatalog`: Updates service API, documentation, and paginators
* This release of Service Catalog adds SearchProvisionedProducts API and ProvisionedProductPlan APIs.
* `service/servicediscovery`: Updates service API and documentation
* This release adds support for registering CNAME record types and creating Route 53 alias records that route traffic to Amazon Elastic Load Balancers using Amazon Route 53 Auto Naming APIs.
* `service/ssm`: Updates service API and documentation
* This Patch Manager release supports configuring Linux repos as part of patch baselines, controlling updates of non-OS security packages and also creating patch baselines for SUSE12
### SDK Enhancements
* `private/model/api`: Add validation to ensure there is no duplication of services in models/apis ([#1758](https://github.com/aws/aws-sdk-go/pull/1758))
* Prevents the SDK from mistakenly generating code a single service multiple times with different model versions.
* `example/service/ec2/instancesbyRegion`: Fix typos in example ([#1762](https://github.com/aws/aws-sdk-go/pull/1762))
* `private/model/api`: removing SDK API reference crosslinks from input/output shapes. (#1765)
### SDK Bugs
* `aws/session`: Fix bug in session.New not supporting AWS_SDK_LOAD_CONFIG ([#1770](https://github.com/aws/aws-sdk-go/pull/1770))
* Fixes a bug in the session.New function that was not correctly sourcing the shared configuration files' path.
* Fixes [#1771](https://github.com/aws/aws-sdk-go/pull/1771)
Release v1.12.71 (2018-02-05)
===
### Service Client Updates
* `service/acm`: Updates service documentation
* Documentation updates for acm
* `service/cloud9`: Updates service documentation and examples
* API usage examples for AWS Cloud9.
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/kinesis`: Updates service API and documentation
* Using ListShards a Kinesis Data Streams customer or client can get information about shards in a data stream (including meta-data for each shard) without obtaining data stream level information.
* `service/opsworks`: Updates service API, documentation, and waiters
* AWS OpsWorks Stacks supports EBS encryption and HDD volume types. Also, a new DescribeOperatingSystems API is available, which lists all operating systems supported by OpsWorks Stacks.
Release v1.12.70 (2018-01-26)
===
### Service Client Updates
* `service/devicefarm`: Updates service API and documentation
* Add InteractionMode in CreateRemoteAccessSession for DirectDeviceAccess feature.
* `service/medialive`: Updates service API and documentation
* Add InputSpecification to CreateChannel (specification of input attributes is used for channel sizing and affects pricing); add NotFoundException to DeleteInputSecurityGroups.
* `service/mturk-requester`: Updates service documentation
Release v1.12.69 (2018-01-26)
===
### SDK Bugs
* `models/api`: Fix colliding names [#1754](https://github.com/aws/aws-sdk-go/pull/1754) [#1756](https://github.com/aws/aws-sdk-go/pull/1756)
* SDK had duplicate folders that were causing errors in some builds.
* Fixes [#1753](https://github.com/aws/aws-sdk-go/issues/1753)
Release v1.12.68 (2018-01-25)
===
### Service Client Updates
* `service/alexaforbusiness`: Updates service API and documentation
* `service/codebuild`: Updates service API and documentation
* Adding support for Shallow Clone and GitHub Enterprise in AWS CodeBuild.
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/guardduty`: Adds new service
* Added the missing AccessKeyDetails object to the resource shape.
* `service/lambda`: Updates service API and documentation
* AWS Lambda now supports Revision ID on your function versions and aliases, to track and apply conditional updates when you are updating your function version or alias resources.
### SDK Bugs
* `service/s3/s3manager`: Fix check for nil OrigErr in Error() [#1749](https://github.com/aws/aws-sdk-go/issues/1749)
* S3 Manager's `Error` type did not check for nil of `OrigErr` when calling `Error()`
* Fixes [#1748](https://github.com/aws/aws-sdk-go/issues/1748)
Release v1.12.67 (2018-01-22)
===
### Service Client Updates
* `service/budgets`: Updates service API and documentation
* Add additional costTypes: IncludeDiscount, UseAmortized, to support finer control for different charges included in a cost budget.
Release v1.12.66 (2018-01-19)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/glue`: Updates service API and documentation
* New AWS Glue DataCatalog APIs to manage table versions and a new feature to skip archiving of the old table version when updating table.
* `service/transcribe`: Adds new service
Release v1.12.65 (2018-01-18)
===
### Service Client Updates
* `service/sagemaker`: Updates service API and documentation
* CreateTrainingJob and CreateEndpointConfig now supports KMS Key for volume encryption.
Release v1.12.64 (2018-01-17)
===
### Service Client Updates
* `service/autoscaling-plans`: Updates service documentation
* `service/ec2`: Updates service documentation
* Documentation updates for EC2
Release v1.12.63 (2018-01-17)
===
### Service Client Updates
* `service/application-autoscaling`: Updates service API and documentation
* `service/autoscaling-plans`: Adds new service
* `service/rds`: Updates service API and documentation
* With this release you can now integrate RDS DB instances with CloudWatch Logs. We have added parameters to the operations for creating and modifying DB instances (for example CreateDBInstance) to allow you to take advantage of this capability through the CLI and API. Once you enable this feature, a stream of log events will publish to CloudWatch Logs for each log type you enable.
Release v1.12.62 (2018-01-15)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/lambda`: Updates service API and documentation
* Support for creating Lambda Functions using 'dotnetcore2.0' and 'go1.x'.
Release v1.12.61 (2018-01-12)
===

View file

@ -1,12 +1,11 @@
package client
import (
"math/rand"
"strconv"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
@ -31,8 +30,6 @@ func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
@ -53,7 +50,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
retryCount = 13
}
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
}
@ -117,22 +114,3 @@ func canUseRetryAfterHeader(r *request.Request) bool {
return true
}
// lockedSource is a thread-safe implementation of rand.Source
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}

View file

@ -46,6 +46,7 @@ func (reader *teeReaderCloser) Close() error {
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body)
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
@ -53,6 +54,9 @@ func logRequest(r *request.Request) {
}
if logBody {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.

View file

@ -2,8 +2,17 @@ package client
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
type mockCloser struct {
@ -55,3 +64,81 @@ func TestLogWriter(t *testing.T) {
t.Errorf("Expected %q, but received %q", expected, lw.buf.String())
}
}
func TestLogRequest(t *testing.T) {
cases := []struct {
Body io.ReadSeeker
ExpectBody []byte
LogLevel aws.LogLevelType
}{
{
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("body content"))),
ExpectBody: []byte("body content"),
},
{
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("body content"))),
LogLevel: aws.LogDebugWithHTTPBody,
ExpectBody: []byte("body content"),
},
{
Body: bytes.NewReader([]byte("body content")),
ExpectBody: []byte("body content"),
},
{
Body: bytes.NewReader([]byte("body content")),
LogLevel: aws.LogDebugWithHTTPBody,
ExpectBody: []byte("body content"),
},
}
for i, c := range cases {
logW := bytes.NewBuffer(nil)
req := request.New(
aws.Config{
Credentials: credentials.AnonymousCredentials,
Logger: &bufLogger{w: logW},
LogLevel: aws.LogLevel(c.LogLevel),
},
metadata.ClientInfo{
Endpoint: "https://mock-service.mock-region.amazonaws.com",
},
testHandlers(),
nil,
&request.Operation{
Name: "APIName",
HTTPMethod: "POST",
HTTPPath: "/",
},
struct{}{}, nil,
)
req.SetReaderBody(c.Body)
req.Build()
logRequest(req)
b, err := ioutil.ReadAll(req.HTTPRequest.Body)
if err != nil {
t.Fatalf("%d, expect to read SDK request Body", i)
}
if e, a := c.ExpectBody, b; !reflect.DeepEqual(e, a) {
t.Errorf("%d, expect %v body, got %v", i, e, a)
}
}
}
type bufLogger struct {
w *bytes.Buffer
}
func (l *bufLogger) Log(args ...interface{}) {
fmt.Fprintln(l.w, args...)
}
func testHandlers() request.Handlers {
var handlers request.Handlers
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
return handlers
}

View file

@ -3,7 +3,6 @@ package corehandlers
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
@ -36,18 +35,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
if r.Body != nil {
var err error
length, err = aws.SeekerLen(r.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
return
}
}
}

View file

@ -1,5 +1,10 @@
// Package ec2metadata provides the client for making API calls to the
// EC2 Metadata service.
//
// This package's client can be disabled completely by setting the environment
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environemnt variable is set to true, (case insensitive).
package ec2metadata
import (
@ -7,17 +12,21 @@ import (
"errors"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/request"
)
// ServiceName is the name of the service.
const ServiceName = "ec2metadata"
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
// A EC2Metadata is an EC2 Metadata service Client.
type EC2Metadata struct {
@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
svc.Handlers.Validate.Clear()
svc.Handlers.Validate.PushBack(validateEndpointHandler)
// Disable the EC2 Metadata service if the environment variable is set.
// This shortcirctes the service's functionality to always fail to send
// requests.
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
svc.Handlers.Send.SwapNamed(request.NamedHandler{
Name: corehandlers.SendHandler.Name,
Fn: func(r *request.Request) {
r.Error = awserr.New(
request.CanceledErrorCode,
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
nil)
},
})
}
// Add additional options to the service config
for _, option := range opts {
option(svc.Client)

View file

@ -3,21 +3,30 @@ package ec2metadata_test
import (
"net/http"
"net/http/httptest"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/stretchr/testify/assert"
)
func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) {
svc := ec2metadata.New(unit.Session)
assert.NotEqual(t, http.DefaultClient, svc.Config.HTTPClient)
assert.Equal(t, 5*time.Second, svc.Config.HTTPClient.Timeout)
if e, a := http.DefaultClient, svc.Config.HTTPClient; e == a {
t.Errorf("expect %v, not to equal %v", e, a)
}
if e, a := 5*time.Second, svc.Config.HTTPClient.Timeout; e != a {
t.Errorf("expect %v to be %v", e, a)
}
}
func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) {
@ -28,18 +37,25 @@ func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) {
svc := ec2metadata.New(unit.Session)
assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient)
if e, a := http.DefaultClient, svc.Config.HTTPClient; e != a {
t.Errorf("expect %v, got %v", e, a)
}
tr, ok := svc.Config.HTTPClient.Transport.(*http.Transport)
assert.True(t, ok)
assert.NotNil(t, tr)
assert.Nil(t, tr.Dial)
tr := svc.Config.HTTPClient.Transport.(*http.Transport)
if tr == nil {
t.Fatalf("expect transport not to be nil")
}
if tr.Dial != nil {
t.Errorf("expect dial to be nil, was not")
}
}
func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) {
svc := ec2metadata.New(unit.Session, aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))
assert.Equal(t, http.DefaultClient, svc.Config.HTTPClient)
if e, a := http.DefaultClient, svc.Config.HTTPClient; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) {
@ -63,6 +79,30 @@ func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) {
runEC2MetadataClients(t, cfg, 100)
}
func TestClientDisableIMDS(t *testing.T) {
env := awstesting.StashEnv()
defer awstesting.PopEnv(env)
os.Setenv("AWS_EC2_METADATA_DISABLED", "true")
svc := ec2metadata.New(unit.Session)
resp, err := svc.Region()
if err == nil {
t.Fatalf("expect error, got none")
}
if len(resp) != 0 {
t.Errorf("expect no response, got %v", resp)
}
aerr := err.(awserr.Error)
if e, a := request.CanceledErrorCode, aerr.Code(); e != a {
t.Errorf("expect %v error code, got %v", e, a)
}
if e, a := "AWS_EC2_METADATA_DISABLED", aerr.Message(); !strings.Contains(a, e) {
t.Errorf("expect %v in error message, got %v", e, a)
}
}
func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) {
var wg sync.WaitGroup
wg.Add(atOnce)
@ -70,7 +110,9 @@ func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) {
go func() {
svc := ec2metadata.New(unit.Session, cfg)
_, err := svc.Region()
assert.NoError(t, err)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
wg.Done()
}()
}

View file

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
)
@ -84,11 +85,34 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
custAddEC2Metadata(p)
custAddS3DualStack(p)
custRmIotDataService(p)
custFixCloudHSMv2SigningName(p)
}
return ps, nil
}
func custFixCloudHSMv2SigningName(p *partition) {
// Workaround for aws/aws-sdk-go#1745 until the endpoint model can be
// fixed upstream. TODO remove this once the endpoints model is updated.
s, ok := p.Services["cloudhsmv2"]
if !ok {
return
}
if len(s.Defaults.CredentialScope.Service) != 0 {
fmt.Fprintf(os.Stderr, "cloudhsmv2 signing name already set, ignoring override.\n")
// If the value is already set don't override
return
}
s.Defaults.CredentialScope.Service = "cloudhsm"
fmt.Fprintf(os.Stderr, "cloudhsmv2 signing name not set, overriding.\n")
p.Services["cloudhsmv2"] = s
}
func custAddS3DualStack(p *partition) {
if p.ID != "aws" {
return

View file

@ -115,3 +115,126 @@ func TestDecodeModelOptionsSet(t *testing.T) {
t.Errorf("expect %v options got %v", expect, actual)
}
}
func TestDecode_CustFixCloudHSMv2SigningName(t *testing.T) {
cases := []struct {
Doc string
Expect string
}{
{
Doc: `
{
"version": 3,
"partitions": [
{
"defaults": {
"hostname": "{service}.{region}.{dnsSuffix}",
"protocols": [
"https"
],
"signatureVersions": [
"v4"
]
},
"dnsSuffix": "amazonaws.com",
"partition": "aws",
"partitionName": "AWS Standard",
"regionRegex": "^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$",
"regions": {
"ap-northeast-1": {
"description": "Asia Pacific (Tokyo)"
},
"us-east-1": {
"description": "US East (N. Virginia)"
}
},
"services": {
"cloudhsmv2": {
"endpoints": {
"us-east-1": {}
}
},
"s3": {
"endpoints": {
"ap-northeast-1": {}
}
}
}
}
]
}`,
Expect: "cloudhsm",
},
{
Doc: `
{
"version": 3,
"partitions": [
{
"defaults": {
"hostname": "{service}.{region}.{dnsSuffix}",
"protocols": [
"https"
],
"signatureVersions": [
"v4"
]
},
"dnsSuffix": "amazonaws.com",
"partition": "aws",
"partitionName": "AWS Standard",
"regionRegex": "^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$",
"regions": {
"ap-northeast-1": {
"description": "Asia Pacific (Tokyo)"
},
"us-east-1": {
"description": "US East (N. Virginia)"
}
},
"services": {
"cloudhsmv2": {
"defaults": {
"credentialScope": {
"service": "coolSigningName"
}
},
"endpoints": {
"us-east-1": {}
}
},
"s3": {
"endpoints": {
"ap-northeast-1": {}
}
}
}
}
]
}`,
Expect: "coolSigningName",
},
}
for i, c := range cases {
resolver, err := DecodeModel(strings.NewReader(c.Doc))
if err != nil {
t.Fatalf("%d, expected no error, got %v", i, err)
}
p := resolver.(partitions)[0]
defaults := p.Services["cloudhsmv2"].Defaults
if e, a := c.Expect, defaults.CredentialScope.Service; e != a {
t.Errorf("%d, expect %v, got %v", i, e, a)
}
endpoint, err := resolver.EndpointFor("cloudhsmv2", "us-east-1")
if err != nil {
t.Fatalf("%d, failed to resolve endpoint, %v", i, err)
}
if e, a := c.Expect, endpoint.SigningName; e != a {
t.Errorf("%d, expected %q go %q", i, e, a)
}
}
}

View file

@ -52,6 +52,7 @@ const (
Appstream2ServiceID = "appstream2" // Appstream2.
AthenaServiceID = "athena" // Athena.
AutoscalingServiceID = "autoscaling" // Autoscaling.
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
BatchServiceID = "batch" // Batch.
BudgetsServiceID = "budgets" // Budgets.
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
@ -105,12 +106,16 @@ const (
IotServiceID = "iot" // Iot.
KinesisServiceID = "kinesis" // Kinesis.
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
KmsServiceID = "kms" // Kms.
LambdaServiceID = "lambda" // Lambda.
LightsailServiceID = "lightsail" // Lightsail.
LogsServiceID = "logs" // Logs.
MachinelearningServiceID = "machinelearning" // Machinelearning.
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
MedialiveServiceID = "medialive" // Medialive.
MediapackageServiceID = "mediapackage" // Mediapackage.
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MghServiceID = "mgh" // Mgh.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
@ -131,6 +136,7 @@ const (
S3ServiceID = "s3" // S3.
SdbServiceID = "sdb" // Sdb.
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
ShieldServiceID = "shield" // Shield.
SmsServiceID = "sms" // Sms.
SnowballServiceID = "snowball" // Snowball.
@ -370,6 +376,22 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"autoscaling-plans": service{
Defaults: endpoint{
Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
CredentialScope: credentialScope{
Service: "autoscaling-plans",
},
},
Endpoints: endpoints{
"ap-southeast-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"batch": service{
Endpoints: endpoints{
@ -402,6 +424,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
@ -459,7 +482,11 @@ var awsPartition = partition{
},
},
"cloudhsmv2": service{
Defaults: endpoint{
CredentialScope: credentialScope{
Service: "cloudhsm",
},
},
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-south-1": endpoint{},
@ -577,6 +604,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@ -588,6 +616,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
@ -689,6 +718,8 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
@ -740,6 +771,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@ -1043,10 +1075,13 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@ -1093,10 +1128,11 @@ var awsPartition = partition{
"glue": service{
Endpoints: endpoints{
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
"ap-northeast-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"greengrass": service{
@ -1156,6 +1192,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
@ -1207,6 +1244,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"kinesisvideo": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
},
"kms": service{
Endpoints: endpoints{
@ -1295,6 +1342,44 @@ var awsPartition = partition{
"us-east-1": endpoint{},
},
},
"mediaconvert": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"medialive": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
},
"mediapackage": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-3": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
},
"metering.marketplace": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@ -1489,6 +1574,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
@ -1619,6 +1705,15 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"servicediscovery": service{
Endpoints: endpoints{
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"shield": service{
IsRegionalized: boxedFalse,
Defaults: endpoint{
@ -1636,8 +1731,10 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@ -1651,6 +1748,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
@ -1733,7 +1831,9 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
@ -1912,6 +2012,8 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-west-1": endpoint{},
@ -2233,6 +2335,12 @@ var awscnPartition = partition{
"cn-northwest-1": endpoint{},
},
},
"sms": service{
Endpoints: endpoints{
"cn-north-1": endpoint{},
},
},
"snowball": service{
Endpoints: endpoints{
@ -2423,6 +2531,18 @@ var awsusgovPartition = partition{
},
},
},
"ecr": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"ecs": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"elasticache": service{
Endpoints: endpoints{
@ -2451,6 +2571,12 @@ var awsusgovPartition = partition{
},
},
},
"es": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"events": service{
Endpoints: endpoints{

View file

@ -224,6 +224,9 @@ func (r *Request) SetContext(ctx aws.Context) {
// WillRetry returns if the request's can be retried.
func (r *Request) WillRetry() bool {
if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
return false
}
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
}
@ -255,6 +258,7 @@ func (r *Request) SetStringBody(s string) {
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.Body = reader
r.BodyStart, _ = reader.Seek(0, 1) // Get the Bodies current offset.
r.ResetBody()
}
@ -393,7 +397,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
// of the SDK if they used that field.
//
// Related golang/go#18257
l, err := computeBodyLength(r.Body)
l, err := aws.SeekerLen(r.Body)
if err != nil {
return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
}
@ -411,7 +415,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
// Transfer-Encoding: chunked bodies for these methods.
//
// This would only happen if a aws.ReaderSeekerCloser was used with
// a io.Reader that was not also an io.Seeker.
// a io.Reader that was not also an io.Seeker, or did not implement
// Len() method.
switch r.Operation.HTTPMethod {
case "GET", "HEAD", "DELETE":
body = NoBody
@ -423,42 +428,6 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
return body, nil
}
// Attempts to compute the length of the body of the reader using the
// io.Seeker interface. If the value is not seekable because of being
// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
// If no error occurs the length of the body will be returned.
func computeBodyLength(r io.ReadSeeker) (int64, error) {
seekable := true
// Determine if the seeker is actually seekable. ReaderSeekerCloser
// hides the fact that a io.Readers might not actually be seekable.
switch v := r.(type) {
case aws.ReaderSeekerCloser:
seekable = v.IsSeeker()
case *aws.ReaderSeekerCloser:
seekable = v.IsSeeker()
}
if !seekable {
return -1, nil
}
curOffset, err := r.Seek(0, 1)
if err != nil {
return 0, err
}
endOffset, err := r.Seek(0, 2)
if err != nil {
return 0, err
}
_, err = r.Seek(curOffset, 0)
if err != nil {
return 0, err
}
return endOffset - curOffset, nil
}
// GetBody will return an io.ReadSeeker of the Request's underlying
// input body with a concurrency safe wrapper.
func (r *Request) GetBody() io.ReadSeeker {

View file

@ -142,13 +142,28 @@ func (r *Request) nextPageTokens() []interface{} {
tokens := []interface{}{}
tokenAdded := false
for _, outToken := range r.Operation.OutputTokens {
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
if len(v) > 0 {
tokens = append(tokens, v[0])
tokenAdded = true
} else {
vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
if len(vs) == 0 {
tokens = append(tokens, nil)
continue
}
v := vs[0]
switch tv := v.(type) {
case *string:
if len(aws.StringValue(tv)) == 0 {
tokens = append(tokens, nil)
continue
}
case string:
if len(tv) == 0 {
tokens = append(tokens, nil)
continue
}
}
tokenAdded = true
tokens = append(tokens, v)
}
if !tokenAdded {
return nil

View file

@ -454,78 +454,93 @@ func TestPaginationWithContextNilInput(t *testing.T) {
}
}
type testPageInput struct {
NextToken string
}
type testPageOutput struct {
Value string
NextToken *string
}
func TestPagination_Standalone(t *testing.T) {
expect := []struct {
Value, PrevToken, NextToken string
}{
{"FirstValue", "InitalToken", "FirstToken"},
{"SecondValue", "FirstToken", "SecondToken"},
{"ThirdValue", "SecondToken", ""},
type testPageInput struct {
NextToken *string
}
input := testPageInput{
NextToken: expect[0].PrevToken,
type testPageOutput struct {
Value *string
NextToken *string
}
type testCase struct {
Value, PrevToken, NextToken *string
}
c := awstesting.NewClient()
i := 0
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
r := c.NewRequest(
&request.Operation{
Name: "Operation",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
},
},
&input, &testPageOutput{},
)
// Setup handlers for testing
r.Handlers.Clear()
r.Handlers.Build.PushBack(func(req *request.Request) {
in := req.Params.(*testPageInput)
if e, a := expect[i].PrevToken, in.NextToken; e != a {
t.Errorf("%d, expect NextToken input %q, got %q", i, e, a)
}
})
r.Handlers.Unmarshal.PushBack(func(req *request.Request) {
out := &testPageOutput{
Value: expect[i].Value,
}
if len(expect[i].NextToken) > 0 {
out.NextToken = aws.String(expect[i].NextToken)
}
req.Data = out
})
return r, nil
cases := [][]testCase{
{
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), nil},
},
{
testCase{aws.String("FirstValue"), aws.String("InitalToken"), aws.String("FirstToken")},
testCase{aws.String("SecondValue"), aws.String("FirstToken"), aws.String("SecondToken")},
testCase{aws.String("ThirdValue"), aws.String("SecondToken"), aws.String("")},
},
}
for p.Next() {
data := p.Page().(*testPageOutput)
if e, a := expect[i].Value, data.Value; e != a {
t.Errorf("%d, expect Value to be %q, got %q", i, e, a)
}
if e, a := expect[i].NextToken, aws.StringValue(data.NextToken); e != a {
t.Errorf("%d, expect NextToken to be %q, got %q", i, e, a)
for _, c := range cases {
input := testPageInput{
NextToken: c[0].PrevToken,
}
i++
}
if e, a := len(expect), i; e != a {
t.Errorf("expected to process %d pages, did %d", e, a)
}
if err := p.Err(); err != nil {
t.Fatalf("%d, expected no error, got %v", i, err)
svc := awstesting.NewClient()
i := 0
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
r := svc.NewRequest(
&request.Operation{
Name: "Operation",
Paginator: &request.Paginator{
InputTokens: []string{"NextToken"},
OutputTokens: []string{"NextToken"},
},
},
&input, &testPageOutput{},
)
// Setup handlers for testing
r.Handlers.Clear()
r.Handlers.Build.PushBack(func(req *request.Request) {
if e, a := len(c), i+1; a > e {
t.Fatalf("expect no more than %d requests, got %d", e, a)
}
in := req.Params.(*testPageInput)
if e, a := aws.StringValue(c[i].PrevToken), aws.StringValue(in.NextToken); e != a {
t.Errorf("%d, expect NextToken input %q, got %q", i, e, a)
}
})
r.Handlers.Unmarshal.PushBack(func(req *request.Request) {
out := &testPageOutput{
Value: c[i].Value,
}
if c[i].NextToken != nil {
next := *c[i].NextToken
out.NextToken = aws.String(next)
}
req.Data = out
})
return r, nil
},
}
for p.Next() {
data := p.Page().(*testPageOutput)
if e, a := aws.StringValue(c[i].Value), aws.StringValue(data.Value); e != a {
t.Errorf("%d, expect Value to be %q, got %q", i, e, a)
}
if e, a := aws.StringValue(c[i].NextToken), aws.StringValue(data.NextToken); e != a {
t.Errorf("%d, expect NextToken to be %q, got %q", i, e, a)
}
i++
}
if e, a := len(c), i; e != a {
t.Errorf("expected to process %d pages, did %d", e, a)
}
if err := p.Err(); err != nil {
t.Fatalf("%d, expected no error, got %v", i, err)
}
}
}

View file

@ -2,6 +2,7 @@ package request
import (
"bytes"
"io"
"net/http"
"strings"
"testing"
@ -25,21 +26,70 @@ func TestResetBody_WithBodyContents(t *testing.T) {
}
}
func TestResetBody_ExcludeUnseekableBodyByMethod(t *testing.T) {
type mockReader struct{}
func (mockReader) Read([]byte) (int, error) {
return 0, io.EOF
}
func TestResetBody_ExcludeEmptyUnseekableBodyByMethod(t *testing.T) {
cases := []struct {
Method string
Body io.ReadSeeker
IsNoBody bool
}{
{"GET", true},
{"HEAD", true},
{"DELETE", true},
{"PUT", false},
{"PATCH", false},
{"POST", false},
{
Method: "GET",
IsNoBody: true,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "HEAD",
IsNoBody: true,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "DELETE",
IsNoBody: true,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "PUT",
IsNoBody: false,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "PATCH",
IsNoBody: false,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "POST",
IsNoBody: false,
Body: aws.ReadSeekCloser(mockReader{}),
},
{
Method: "GET",
IsNoBody: false,
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("abc"))),
},
{
Method: "GET",
IsNoBody: true,
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
},
{
Method: "POST",
IsNoBody: false,
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("abc"))),
},
{
Method: "POST",
IsNoBody: true,
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
},
}
reader := aws.ReadSeekCloser(bytes.NewBuffer([]byte("abc")))
for i, c := range cases {
r := Request{
HTTPRequest: &http.Request{},
@ -47,8 +97,7 @@ func TestResetBody_ExcludeUnseekableBodyByMethod(t *testing.T) {
HTTPMethod: c.Method,
},
}
r.SetReaderBody(reader)
r.SetReaderBody(c.Body)
if a, e := r.HTTPRequest.Body == NoBody, c.IsNoBody; a != e {
t.Errorf("%d, expect body to be set to noBody(%t), but was %t", i, e, a)

View file

@ -22,13 +22,13 @@ import (
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/private/protocol/rest"
"github.com/aws/aws-sdk-go/aws/defaults"
)
type testData struct {
@ -893,7 +893,6 @@ func TestRequest_Presign(t *testing.T) {
},
SignerFn: func(r *request.Request) {
r.HTTPRequest.URL = mustParseURL("https://endpoint/presignedURL")
fmt.Println("url", r.HTTPRequest.URL.String())
if r.NotHoist {
r.Error = fmt.Errorf("expect NotHoist to be cleared")
}
@ -961,7 +960,7 @@ func TestRequest_Presign(t *testing.T) {
}
}
}
func TestNew_EndpointWithDefaultPort(t *testing.T) {
endpoint := "https://estest.us-east-1.es.amazonaws.com:443"
expectedRequestHost := "estest.us-east-1.es.amazonaws.com"
@ -983,7 +982,7 @@ func TestNew_EndpointWithDefaultPort(t *testing.T) {
func TestSanitizeHostForHeader(t *testing.T) {
cases := []struct {
url string
url string
expectedRequestHost string
}{
{"https://estest.us-east-1.es.amazonaws.com:443", "estest.us-east-1.es.amazonaws.com"},
@ -999,6 +998,91 @@ func TestSanitizeHostForHeader(t *testing.T) {
if h := r.Host; h != c.expectedRequestHost {
t.Errorf("expect %v host, got %q", c.expectedRequestHost, h)
}
}
}
}
}
func TestRequestWillRetry_ByBody(t *testing.T) {
svc := awstesting.NewClient()
cases := []struct {
WillRetry bool
HTTPMethod string
Body io.ReadSeeker
IsReqNoBody bool
}{
{
WillRetry: true,
HTTPMethod: "GET",
Body: bytes.NewReader([]byte{}),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "GET",
Body: bytes.NewReader(nil),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: bytes.NewReader([]byte("abc123")),
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewReader([]byte("abc123"))),
},
{
WillRetry: true,
HTTPMethod: "GET",
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
IsReqNoBody: true,
},
{
WillRetry: true,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewBuffer(nil)),
IsReqNoBody: true,
},
{
WillRetry: false,
HTTPMethod: "POST",
Body: aws.ReadSeekCloser(bytes.NewBuffer([]byte("abc123"))),
},
}
for i, c := range cases {
req := svc.NewRequest(&request.Operation{
Name: "Operation",
HTTPMethod: c.HTTPMethod,
HTTPPath: "/",
}, nil, nil)
req.SetReaderBody(c.Body)
req.Build()
req.Error = fmt.Errorf("some error")
req.Retryable = aws.Bool(true)
req.HTTPResponse = &http.Response{
StatusCode: 500,
}
if e, a := c.IsReqNoBody, request.NoBody == req.HTTPRequest.Body; e != a {
t.Errorf("%d, expect request to be no body, %t, got %t, %T", i, e, a, req.HTTPRequest.Body)
}
if e, a := c.WillRetry, req.WillRetry(); e != a {
t.Errorf("%d, expect %t willRetry, got %t", i, e, a)
}
if req.Error == nil {
t.Fatalf("%d, expect error, got none", i)
}
if e, a := "some error", req.Error.Error(); !strings.Contains(a, e) {
t.Errorf("%d, expect %q error in %q", i, e, a)
}
if e, a := 0, req.RetryCount; e != a {
t.Errorf("%d, expect retry count to be %d, got %d", i, e, a)
}
}
}

View file

@ -5,6 +5,7 @@ import (
"strconv"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
)
// EnvProviderName provides a name of the provider when config is loaded from environment.
@ -176,6 +177,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
if len(cfg.SharedCredentialsFile) == 0 {
cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
}
if len(cfg.SharedConfigFile) == 0 {
cfg.SharedConfigFile = defaults.SharedConfigFilename()
}
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
return cfg

View file

@ -7,6 +7,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
func TestLoadEnvConfig_Creds(t *testing.T) {
@ -105,6 +106,8 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
@ -116,6 +119,8 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
@ -128,7 +133,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
@ -136,6 +143,10 @@ func TestLoadEnvConfig(t *testing.T) {
"AWS_DEFAULT_REGION": "default_region",
"AWS_DEFAULT_PROFILE": "default_profile",
},
Config: envConfig{
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
Env: map[string]string{
@ -145,7 +156,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
@ -155,7 +168,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
@ -168,7 +183,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
@ -182,7 +199,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "region", Profile: "profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
@ -193,7 +212,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
@ -205,7 +226,9 @@ func TestLoadEnvConfig(t *testing.T) {
},
Config: envConfig{
Region: "default_region", Profile: "default_profile",
EnableSharedConfig: true,
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},
@ -214,7 +237,9 @@ func TestLoadEnvConfig(t *testing.T) {
"AWS_CA_BUNDLE": "custom_ca_bundle",
},
Config: envConfig{
CustomCABundle: "custom_ca_bundle",
CustomCABundle: "custom_ca_bundle",
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
},
{
@ -222,8 +247,10 @@ func TestLoadEnvConfig(t *testing.T) {
"AWS_CA_BUNDLE": "custom_ca_bundle",
},
Config: envConfig{
CustomCABundle: "custom_ca_bundle",
EnableSharedConfig: true,
CustomCABundle: "custom_ca_bundle",
EnableSharedConfig: true,
SharedCredentialsFile: shareddefaults.SharedCredentialsFilename(),
SharedConfigFile: shareddefaults.SharedConfigFilename(),
},
UseSharedConfigCall: true,
},

View file

@ -58,7 +58,12 @@ func New(cfgs ...*aws.Config) *Session {
envCfg := loadEnvConfig()
if envCfg.EnableSharedConfig {
s, err := newSession(Options{}, envCfg, cfgs...)
var cfg aws.Config
cfg.MergeIn(cfgs...)
s, err := NewSessionWithOptions(Options{
Config: cfg,
SharedConfigState: SharedConfigEnable,
})
if err != nil {
// Old session.New expected all errors to be discovered when
// a request is made, and would report the errors then. This
@ -243,13 +248,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
envCfg.EnableSharedConfig = true
}
if len(envCfg.SharedCredentialsFile) == 0 {
envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
}
if len(envCfg.SharedConfigFile) == 0 {
envCfg.SharedConfigFile = defaults.SharedConfigFilename()
}
// Only use AWS_CA_BUNDLE if session option is not provided.
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
f, err := os.Open(envCfg.CustomCABundle)

View file

@ -341,7 +341,9 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
ctx.sanitizeHostForHeader()
ctx.assignAmzQueryValues()
ctx.build(v4.DisableHeaderHoisting)
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
return nil, err
}
// If the request is not presigned the body should be attached to it. This
// prevents the confusion of wanting to send a signed request without
@ -503,11 +505,13 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
v4.Logger.Log(msg)
}
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends
ctx.buildBodyDigest()
if err := ctx.buildBodyDigest(); err != nil {
return err
}
unsignedHeaders := ctx.Request.Header
if ctx.isPresign {
@ -535,6 +539,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
}
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
return nil
}
func (ctx *signingCtx) buildTime() {
@ -661,7 +667,7 @@ func (ctx *signingCtx) buildSignature() {
ctx.signature = hex.EncodeToString(signature)
}
func (ctx *signingCtx) buildBodyDigest() {
func (ctx *signingCtx) buildBodyDigest() error {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
@ -669,6 +675,9 @@ func (ctx *signingCtx) buildBodyDigest() {
} else if ctx.Body == nil {
hash = emptyStringSHA256
} else {
if !aws.IsReaderSeekable(ctx.Body) {
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
}
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
}
if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
@ -676,6 +685,8 @@ func (ctx *signingCtx) buildBodyDigest() {
}
}
ctx.bodyDigest = hash
return nil
}
// isRequestSigned returns if the request is currently signed or presigned

View file

@ -7,6 +7,7 @@ import (
"net/http"
"net/http/httptest"
"reflect"
"strconv"
"strings"
"testing"
"time"
@ -61,17 +62,42 @@ func TestStripExcessHeaders(t *testing.T) {
}
func buildRequest(serviceName, region, body string) (*http.Request, io.ReadSeeker) {
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
reader := strings.NewReader(body)
req, _ := http.NewRequest("POST", endpoint, reader)
return buildRequestWithBodyReader(serviceName, region, reader)
}
func buildRequestWithBodyReader(serviceName, region string, body io.Reader) (*http.Request, io.ReadSeeker) {
var bodyLen int
type lenner interface {
Len() int
}
if lr, ok := body.(lenner); ok {
bodyLen = lr.Len()
}
endpoint := "https://" + serviceName + "." + region + ".amazonaws.com"
req, _ := http.NewRequest("POST", endpoint, body)
req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()"
req.Header.Add("X-Amz-Target", "prefix.Operation")
req.Header.Add("Content-Type", "application/x-amz-json-1.0")
req.Header.Add("Content-Length", string(len(body)))
req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
req.Header.Set("X-Amz-Target", "prefix.Operation")
req.Header.Set("Content-Type", "application/x-amz-json-1.0")
if bodyLen > 0 {
req.Header.Set("Content-Length", strconv.Itoa(bodyLen))
}
req.Header.Set("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)")
req.Header.Add("X-Amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
req.Header.Add("X-amz-Meta-Other-Header_With_Underscore", "some-value=!@#$%^&* (+)")
return req, reader
var seeker io.ReadSeeker
if sr, ok := body.(io.ReadSeeker); ok {
seeker = sr
} else {
seeker = aws.ReadSeekCloser(body)
}
return req, seeker
}
func buildSigner() Signer {
@ -101,7 +127,7 @@ func TestPresignRequest(t *testing.T) {
expectedDate := "19700101T000000Z"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedSig := "ea7856749041f727690c580569738282e99c79355fe0d8f125d3b5535d2ece83"
expectedSig := "122f0b9e091e4ba84286097e2b3404a1f1f4c4aad479adda95b7dff0ccbe5581"
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
expectedTarget := "prefix.Operation"
@ -135,7 +161,7 @@ func TestPresignBodyWithArrayRequest(t *testing.T) {
expectedDate := "19700101T000000Z"
expectedHeaders := "content-length;content-type;host;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore"
expectedSig := "fef6002062400bbf526d70f1a6456abc0fb2e213fe1416012737eebd42a62924"
expectedSig := "e3ac55addee8711b76c6d608d762cff285fe8b627a057f8b5ec9268cf82c08b1"
expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request"
expectedTarget := "prefix.Operation"
@ -166,14 +192,14 @@ func TestSignRequest(t *testing.T) {
signer.Sign(req, body, "dynamodb", "us-east-1", time.Unix(0, 0))
expectedDate := "19700101T000000Z"
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=ea766cabd2ec977d955a3c2bae1ae54f4515d70752f2207618396f20aa85bd21"
expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-amz-meta-other-header;x-amz-meta-other-header_with_underscore;x-amz-security-token;x-amz-target, Signature=a518299330494908a70222cec6899f6f32f297f8595f6df1776d998936652ad9"
q := req.Header
if e, a := expectedSig, q.Get("Authorization"); e != a {
t.Errorf("expect %v, got %v", e, a)
t.Errorf("expect\n%v\nactual\n%v\n", e, a)
}
if e, a := expectedDate, q.Get("X-Amz-Date"); e != a {
t.Errorf("expect %v, got %v", e, a)
t.Errorf("expect\n%v\nactual\n%v\n", e, a)
}
}
@ -207,6 +233,53 @@ func TestPresignEmptyBodyS3(t *testing.T) {
}
}
func TestSignUnseekableBody(t *testing.T) {
req, body := buildRequestWithBodyReader("mock-service", "mock-region", bytes.NewBuffer([]byte("hello")))
signer := buildSigner()
_, err := signer.Sign(req, body, "mock-service", "mock-region", time.Now())
if err == nil {
t.Fatalf("expect error signing request")
}
if e, a := "unseekable request body", err.Error(); !strings.Contains(a, e) {
t.Errorf("expect %q to be in %q", e, a)
}
}
func TestSignUnsignedPayloadUnseekableBody(t *testing.T) {
req, body := buildRequestWithBodyReader("mock-service", "mock-region", bytes.NewBuffer([]byte("hello")))
signer := buildSigner()
signer.UnsignedPayload = true
_, err := signer.Sign(req, body, "mock-service", "mock-region", time.Now())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
hash := req.Header.Get("X-Amz-Content-Sha256")
if e, a := "UNSIGNED-PAYLOAD", hash; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSignPreComputedHashUnseekableBody(t *testing.T) {
req, body := buildRequestWithBodyReader("mock-service", "mock-region", bytes.NewBuffer([]byte("hello")))
signer := buildSigner()
req.Header.Set("X-Amz-Content-Sha256", "some-content-sha256")
_, err := signer.Sign(req, body, "mock-service", "mock-region", time.Now())
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
hash := req.Header.Get("X-Amz-Content-Sha256")
if e, a := "some-content-sha256", hash; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSignPrecomputedBodyChecksum(t *testing.T) {
req, body := buildRequest("dynamodb", "us-east-1", "hello")
req.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED")

View file

@ -22,6 +22,22 @@ type ReaderSeekerCloser struct {
r io.Reader
}
// IsReaderSeekable returns if the underlying reader type can be seeked. A
// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
// type.
func IsReaderSeekable(r io.Reader) bool {
switch v := r.(type) {
case ReaderSeekerCloser:
return v.IsSeeker()
case *ReaderSeekerCloser:
return v.IsSeeker()
case io.ReadSeeker:
return true
default:
return false
}
}
// Read reads from the reader up to size of p. The number of bytes read, and
// error if it occurred will be returned.
//
@ -56,6 +72,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool {
return ok
}
// HasLen returns the length of the underlying reader if the value implements
// the Len() int method.
func (r ReaderSeekerCloser) HasLen() (int, bool) {
type lenner interface {
Len() int
}
if lr, ok := r.r.(lenner); ok {
return lr.Len(), true
}
return 0, false
}
// GetLen returns the length of the bytes remaining in the underlying reader.
// Checks first for Len(), then io.Seeker to determine the size of the
// underlying reader.
//
// Will return -1 if the length cannot be determined.
func (r ReaderSeekerCloser) GetLen() (int64, error) {
if l, ok := r.HasLen(); ok {
return int64(l), nil
}
if s, ok := r.r.(io.Seeker); ok {
return seekerLen(s)
}
return -1, nil
}
// SeekerLen attempts to get the number of bytes remaining at the seeker's
// current position. Returns the number of bytes remaining or error.
func SeekerLen(s io.Seeker) (int64, error) {
// Determine if the seeker is actually seekable. ReaderSeekerCloser
// hides the fact that a io.Readers might not actually be seekable.
switch v := s.(type) {
case ReaderSeekerCloser:
return v.GetLen()
case *ReaderSeekerCloser:
return v.GetLen()
}
return seekerLen(s)
}
func seekerLen(s io.Seeker) (int64, error) {
curOffset, err := s.Seek(0, 1)
if err != nil {
return 0, err
}
endOffset, err := s.Seek(0, 2)
if err != nil {
return 0, err
}
_, err = s.Seek(curOffset, 0)
if err != nil {
return 0, err
}
return endOffset - curOffset, nil
}
// Close closes the ReaderSeekerCloser.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.12.61"
const SDKVersion = "1.13.4"

Some files were not shown because too many files have changed in this diff Show more