forked from TrueCloudLab/distribution
Bump google.golang.org/grpc from 1.53.0 to 1.56.3
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.53.0 to 1.56.3. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.53.0...v1.56.3) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
66bedcf1a3
commit
32316367c8
67 changed files with 2810 additions and 1121 deletions
14
go.mod
14
go.mod
|
@ -27,7 +27,7 @@ require (
|
|||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.6.1
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/oauth2 v0.6.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
google.golang.org/api v0.114.0
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
|
@ -35,9 +35,9 @@ require (
|
|||
|
||||
require (
|
||||
cloud.google.com/go v0.110.0 // indirect
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.12.0 // indirect
|
||||
cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
@ -49,7 +49,7 @@ require (
|
|||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
|
@ -77,7 +77,7 @@ require (
|
|||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.29.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/grpc v1.56.3 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
)
|
||||
|
|
27
go.sum
27
go.sum
|
@ -21,14 +21,14 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||
cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE=
|
||||
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
|
||||
cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
|
@ -156,8 +156,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -414,8 +415,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
|
|||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
|
||||
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -591,8 +592,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683 h1:khxVcsk/FhnzxMKOyD+TDGwjbEOpcPuIpmafPGFmhMA=
|
||||
google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -606,8 +607,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
|
||||
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -620,8 +621,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM=
|
||||
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.18.0"
|
||||
const Version = "1.19.1"
|
||||
|
|
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
@ -1,5 +1,12 @@
|
|||
# Changes
|
||||
|
||||
## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd))
|
||||
|
||||
## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17)
|
||||
|
||||
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// protoc v3.21.9
|
||||
// source: google/iam/v1/iam_policy.proto
|
||||
|
||||
package iampb
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// protoc v3.21.9
|
||||
// source: google/iam/v1/options.proto
|
||||
|
||||
package iampb
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.12
|
||||
// protoc v3.21.9
|
||||
// source: google/iam/v1/policy.proto
|
||||
|
||||
package iampb
|
||||
|
|
8
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
8
vendor/github.com/golang/protobuf/jsonpb/decode.go
generated
vendored
|
@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
|
|||
}
|
||||
|
||||
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||
if fd.Cardinality() == protoreflect.Repeated {
|
||||
return false
|
||||
}
|
||||
if md := fd.Message(); md != nil {
|
||||
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
|
||||
return md.FullName() == "google.protobuf.Value"
|
||||
}
|
||||
if ed := fd.Enum(); ed != nil {
|
||||
return ed.FullName() == "google.protobuf.NullValue"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
12
vendor/golang.org/x/oauth2/README.md
generated
vendored
12
vendor/golang.org/x/oauth2/README.md
generated
vendored
|
@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
|
|||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
||||
|
||||
## Policy for new packages
|
||||
## Policy for new endpoints
|
||||
|
||||
We no longer accept new provider-specific packages in this repo if all
|
||||
they do is add a single endpoint variable. If you just want to add a
|
||||
|
@ -29,8 +29,12 @@ package.
|
|||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the oauth2 repository is located at
|
||||
https://github.com/golang/oauth2/issues.
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html. In particular:
|
||||
|
||||
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
||||
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
||||
* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).
|
||||
|
|
16
vendor/golang.org/x/oauth2/google/default.go
generated
vendored
16
vendor/golang.org/x/oauth2/google/default.go
generated
vendored
|
@ -13,12 +13,15 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/authhandler"
|
||||
)
|
||||
|
||||
const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
|
||||
|
||||
// Credentials holds Google credentials, including "Application Default Credentials".
|
||||
// For more details, see:
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials
|
||||
|
@ -66,6 +69,14 @@ type CredentialsParams struct {
|
|||
// The OAuth2 TokenURL default override. This value overrides the default TokenURL,
|
||||
// unless explicitly specified by the credentials config file. Optional.
|
||||
TokenURL string
|
||||
|
||||
// EarlyTokenRefresh is the amount of time before a token expires that a new
|
||||
// token will be preemptively fetched. If unset the default value is 10
|
||||
// seconds.
|
||||
//
|
||||
// Note: This option is currently only respected when using credentials
|
||||
// fetched from the GCE metadata server.
|
||||
EarlyTokenRefresh time.Duration
|
||||
}
|
||||
|
||||
func (params CredentialsParams) deepCopy() CredentialsParams {
|
||||
|
@ -153,13 +164,12 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar
|
|||
id, _ := metadata.ProjectID()
|
||||
return &Credentials{
|
||||
ProjectID: id,
|
||||
TokenSource: ComputeTokenSource("", params.Scopes...),
|
||||
TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// None are found; return helpful error.
|
||||
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
|
||||
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
|
||||
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL)
|
||||
}
|
||||
|
||||
// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes.
|
||||
|
|
61
vendor/golang.org/x/oauth2/google/doc.go
generated
vendored
61
vendor/golang.org/x/oauth2/google/doc.go
generated
vendored
|
@ -26,7 +26,7 @@
|
|||
//
|
||||
// Using workload identity federation, your application can access Google Cloud
|
||||
// resources from Amazon Web Services (AWS), Microsoft Azure or any identity
|
||||
// provider that supports OpenID Connect (OIDC).
|
||||
// provider that supports OpenID Connect (OIDC) or SAML 2.0.
|
||||
// Traditionally, applications running outside Google Cloud have used service
|
||||
// account keys to access Google Cloud resources. Using identity federation,
|
||||
// you can allow your workload to impersonate a service account.
|
||||
|
@ -36,26 +36,70 @@
|
|||
// Follow the detailed instructions on how to configure Workload Identity Federation
|
||||
// in various platforms:
|
||||
//
|
||||
// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/access-resources-aws
|
||||
// Microsoft Azure: https://cloud.google.com/iam/docs/access-resources-azure
|
||||
// OIDC identity provider: https://cloud.google.com/iam/docs/access-resources-oidc
|
||||
// Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws
|
||||
// Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure
|
||||
// OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc
|
||||
// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml
|
||||
//
|
||||
// For OIDC and SAML providers, the library can retrieve tokens in three ways:
|
||||
// from a local file location (file-sourced credentials), from a server
|
||||
// (URL-sourced credentials), or from a local executable (executable-sourced
|
||||
// credentials).
|
||||
// For file-sourced credentials, a background process needs to be continuously
|
||||
// refreshing the file location with a new OIDC token prior to expiration.
|
||||
// refreshing the file location with a new OIDC/SAML token prior to expiration.
|
||||
// For tokens with one hour lifetimes, the token needs to be updated in the file
|
||||
// every hour. The token can be stored directly as plain text or in JSON format.
|
||||
// For URL-sourced credentials, a local server needs to host a GET endpoint to
|
||||
// return the OIDC token. The response can be in plain text or JSON.
|
||||
// return the OIDC/SAML token. The response can be in plain text or JSON.
|
||||
// Additional required request headers can also be specified.
|
||||
// For executable-sourced credentials, an application needs to be available to
|
||||
// output the OIDC token and other information in a JSON format.
|
||||
// output the OIDC/SAML token and other information in a JSON format.
|
||||
// For more information on how these work (and how to implement
|
||||
// executable-sourced credentials), please check out:
|
||||
// https://cloud.google.com/iam/docs/using-workload-identity-federation#oidc
|
||||
// https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration
|
||||
//
|
||||
// Note that this library does not perform any validation on the token_url, token_info_url,
|
||||
// or service_account_impersonation_url fields of the credential configuration.
|
||||
// It is not recommended to use a credential configuration that you did not generate with
|
||||
// the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain.
|
||||
//
|
||||
// # Workforce Identity Federation
|
||||
//
|
||||
// Workforce identity federation lets you use an external identity provider (IdP) to
|
||||
// authenticate and authorize a workforce—a group of users, such as employees, partners,
|
||||
// and contractors—using IAM, so that the users can access Google Cloud services.
|
||||
// Workforce identity federation extends Google Cloud's identity capabilities to support
|
||||
// syncless, attribute-based single sign on.
|
||||
//
|
||||
// With workforce identity federation, your workforce can access Google Cloud resources
|
||||
// using an external identity provider (IdP) that supports OpenID Connect (OIDC) or
|
||||
// SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation
|
||||
// Services (AD FS), Okta, and others.
|
||||
//
|
||||
// Follow the detailed instructions on how to configure Workload Identity Federation
|
||||
// in various platforms:
|
||||
//
|
||||
// Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad
|
||||
// Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta
|
||||
// OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc
|
||||
// SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml
|
||||
//
|
||||
// For workforce identity federation, the library can retrieve tokens in three ways:
|
||||
// from a local file location (file-sourced credentials), from a server
|
||||
// (URL-sourced credentials), or from a local executable (executable-sourced
|
||||
// credentials).
|
||||
// For file-sourced credentials, a background process needs to be continuously
|
||||
// refreshing the file location with a new OIDC/SAML token prior to expiration.
|
||||
// For tokens with one hour lifetimes, the token needs to be updated in the file
|
||||
// every hour. The token can be stored directly as plain text or in JSON format.
|
||||
// For URL-sourced credentials, a local server needs to host a GET endpoint to
|
||||
// return the OIDC/SAML token. The response can be in plain text or JSON.
|
||||
// Additional required request headers can also be specified.
|
||||
// For executable-sourced credentials, an application needs to be available to
|
||||
// output the OIDC/SAML token and other information in a JSON format.
|
||||
// For more information on how these work (and how to implement
|
||||
// executable-sourced credentials), please check out:
|
||||
// https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in
|
||||
//
|
||||
// Note that this library does not perform any validation on the token_url, token_info_url,
|
||||
// or service_account_impersonation_url fields of the credential configuration.
|
||||
|
@ -86,5 +130,4 @@
|
|||
// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or
|
||||
// JWTConfigFromJSON, but the Credentials may contain additional information
|
||||
// that is useful is some circumstances.
|
||||
//
|
||||
package google // import "golang.org/x/oauth2/google"
|
||||
|
|
6
vendor/golang.org/x/oauth2/google/google.go
generated
vendored
6
vendor/golang.org/x/oauth2/google/google.go
generated
vendored
|
@ -231,7 +231,11 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar
|
|||
// Further information about retrieving access tokens from the GCE metadata
|
||||
// server can be found at https://cloud.google.com/compute/docs/authentication.
|
||||
func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope})
|
||||
return computeTokenSource(account, 0, scope...)
|
||||
}
|
||||
|
||||
func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry)
|
||||
}
|
||||
|
||||
type computeSource struct {
|
||||
|
|
33
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
33
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
|
@ -16,6 +16,7 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
|||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-empty string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
|
@ -290,6 +291,8 @@ type reuseTokenSource struct {
|
|||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
|
||||
expiryDelta time.Duration
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
|
@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.expiryDelta = s.expiryDelta
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
|||
new: src,
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource that acts in the same manner as the
|
||||
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
||||
// configurable. The expiration time of a token is calculated as
|
||||
// t.Expiry.Add(-earlyExpiry).
|
||||
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly, but set the expiryDelta to earlyExpiry,
|
||||
// so the behavior matches what the user expects.
|
||||
rt.expiryDelta = earlyExpiry
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
if t != nil {
|
||||
t.expiryDelta = earlyExpiry
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
expiryDelta: earlyExpiry,
|
||||
}
|
||||
}
|
||||
|
|
14
vendor/golang.org/x/oauth2/token.go
generated
vendored
14
vendor/golang.org/x/oauth2/token.go
generated
vendored
|
@ -16,10 +16,10 @@ import (
|
|||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// defaultExpiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
const defaultExpiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
|
@ -52,6 +52,11 @@ type Token struct {
|
|||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
|
||||
// expiryDelta is used to calculate when a token is considered
|
||||
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
||||
// is used.
|
||||
expiryDelta time.Duration
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
|
@ -127,6 +132,11 @@ func (t *Token) expired() bool {
|
|||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := defaultExpiryDelta
|
||||
if t.expiryDelta != 0 {
|
||||
expiryDelta = t.expiryDelta
|
||||
}
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
||||
}
|
||||
|
||||
|
|
295
vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
generated
vendored
295
vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
generated
vendored
|
@ -795,6 +795,31 @@ type DotnetSettings struct {
|
|||
|
||||
// Some settings.
|
||||
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
|
||||
// Map from original service names to renamed versions.
|
||||
// This is used when the default generated types
|
||||
// would cause a naming conflict. (Neither name is
|
||||
// fully-qualified.)
|
||||
// Example: Subscriber to SubscriberServiceApi.
|
||||
RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// Map from full resource types to the effective short name
|
||||
// for the resource. This is used when otherwise resource
|
||||
// named from different services would cause naming collisions.
|
||||
// Example entry:
|
||||
// "datalabeling.googleapis.com/Dataset": "DataLabelingDataset"
|
||||
RenamedResources map[string]string `protobuf:"bytes,3,rep,name=renamed_resources,json=renamedResources,proto3" json:"renamed_resources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// List of full resource types to ignore during generation.
|
||||
// This is typically used for API-specific Location resources,
|
||||
// which should be handled by the generator as if they were actually
|
||||
// the common Location resources.
|
||||
// Example entry: "documentai.googleapis.com/Location"
|
||||
IgnoredResources []string `protobuf:"bytes,4,rep,name=ignored_resources,json=ignoredResources,proto3" json:"ignored_resources,omitempty"`
|
||||
// Namespaces which must be aliased in snippets due to
|
||||
// a known (but non-generator-predictable) naming collision
|
||||
ForcedNamespaceAliases []string `protobuf:"bytes,5,rep,name=forced_namespace_aliases,json=forcedNamespaceAliases,proto3" json:"forced_namespace_aliases,omitempty"`
|
||||
// Method signatures (in the form "service.method(signature)")
|
||||
// which are provided separately, so shouldn't be generated.
|
||||
// Snippets *calling* these methods are still generated, however.
|
||||
HandwrittenSignatures []string `protobuf:"bytes,6,rep,name=handwritten_signatures,json=handwrittenSignatures,proto3" json:"handwritten_signatures,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) Reset() {
|
||||
|
@ -836,6 +861,41 @@ func (x *DotnetSettings) GetCommon() *CommonLanguageSettings {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) GetRenamedServices() map[string]string {
|
||||
if x != nil {
|
||||
return x.RenamedServices
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) GetRenamedResources() map[string]string {
|
||||
if x != nil {
|
||||
return x.RenamedResources
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) GetIgnoredResources() []string {
|
||||
if x != nil {
|
||||
return x.IgnoredResources
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) GetForcedNamespaceAliases() []string {
|
||||
if x != nil {
|
||||
return x.ForcedNamespaceAliases
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *DotnetSettings) GetHandwrittenSignatures() []string {
|
||||
if x != nil {
|
||||
return x.HandwrittenSignatures
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Settings for Ruby client libraries.
|
||||
type RubySettings struct {
|
||||
state protoimpl.MessageState
|
||||
|
@ -1037,7 +1097,7 @@ type MethodSettings_LongRunning struct {
|
|||
func (x *MethodSettings_LongRunning) Reset() {
|
||||
*x = MethodSettings_LongRunning{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_api_client_proto_msgTypes[13]
|
||||
mi := &file_google_api_client_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
|
@ -1050,7 +1110,7 @@ func (x *MethodSettings_LongRunning) String() string {
|
|||
func (*MethodSettings_LongRunning) ProtoMessage() {}
|
||||
|
||||
func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_api_client_proto_msgTypes[13]
|
||||
mi := &file_google_api_client_proto_msgTypes[15]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
|
@ -1335,81 +1395,112 @@ var file_google_api_client_proto_rawDesc = []byte{
|
|||
0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
|
||||
0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
|
||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c,
|
||||
0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
||||
0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f,
|
||||
0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74,
|
||||
0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c,
|
||||
0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06,
|
||||
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
|
||||
0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
||||
0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65,
|
||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
|
||||
0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
|
||||
0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74,
|
||||
0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
|
||||
0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69,
|
||||
0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52,
|
||||
0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x94, 0x02, 0x0a,
|
||||
0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12,
|
||||
0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c,
|
||||
0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c,
|
||||
0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65,
|
||||
0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d,
|
||||
0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78,
|
||||
0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61,
|
||||
0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f,
|
||||
0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
|
||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65,
|
||||
0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
|
||||
0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41,
|
||||
0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
|
||||
0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a,
|
||||
0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10,
|
||||
0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a,
|
||||
0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x2a, 0x67,
|
||||
0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44,
|
||||
0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c,
|
||||
0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53,
|
||||
0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
|
||||
0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42,
|
||||
0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41,
|
||||
0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f,
|
||||
0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
|
||||
0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03,
|
||||
0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
|
||||
0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68,
|
||||
0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66,
|
||||
0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74,
|
||||
0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a,
|
||||
0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42,
|
||||
0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
|
||||
0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae,
|
||||
0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
||||
0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
|
||||
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
|
||||
0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a,
|
||||
0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69,
|
||||
0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
|
||||
0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
|
||||
0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
|
||||
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f,
|
||||
0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20,
|
||||
0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
|
||||
0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65,
|
||||
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
|
||||
0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e,
|
||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
|
||||
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e,
|
||||
0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
|
||||
0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
|
||||
0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
|
||||
0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
|
||||
0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47,
|
||||
0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
|
||||
0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
|
||||
0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
|
||||
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
|
||||
0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65,
|
||||
0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65,
|
||||
0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e,
|
||||
0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65,
|
||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
|
||||
0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a,
|
||||
0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
|
||||
0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
|
||||
0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
|
||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50,
|
||||
0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c,
|
||||
0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65,
|
||||
0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
|
||||
0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e,
|
||||
0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
|
||||
0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a,
|
||||
0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65,
|
||||
0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54,
|
||||
0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49,
|
||||
0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49,
|
||||
0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
|
||||
0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41,
|
||||
0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03,
|
||||
0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10,
|
||||
0x04, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
|
||||
0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a,
|
||||
0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f,
|
||||
0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50,
|
||||
0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54,
|
||||
0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45,
|
||||
0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65,
|
||||
0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b,
|
||||
0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67,
|
||||
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
|
||||
0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
|
||||
0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f,
|
||||
0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73,
|
||||
0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
|
||||
0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
|
||||
0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -1425,7 +1516,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
|
||||
var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
|
||||
var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
|
||||
var file_google_api_client_proto_goTypes = []interface{}{
|
||||
(ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
|
||||
(ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
|
||||
|
@ -1442,15 +1533,17 @@ var file_google_api_client_proto_goTypes = []interface{}{
|
|||
(*GoSettings)(nil), // 12: google.api.GoSettings
|
||||
(*MethodSettings)(nil), // 13: google.api.MethodSettings
|
||||
nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
|
||||
(*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning
|
||||
(api.LaunchStage)(0), // 16: google.api.LaunchStage
|
||||
(*durationpb.Duration)(nil), // 17: google.protobuf.Duration
|
||||
(*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions
|
||||
(*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions
|
||||
nil, // 15: google.api.DotnetSettings.RenamedServicesEntry
|
||||
nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry
|
||||
(*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning
|
||||
(api.LaunchStage)(0), // 18: google.api.LaunchStage
|
||||
(*durationpb.Duration)(nil), // 19: google.protobuf.Duration
|
||||
(*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions
|
||||
(*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
|
||||
}
|
||||
var file_google_api_client_proto_depIdxs = []int32{
|
||||
1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
|
||||
16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
|
||||
18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
|
||||
5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
|
||||
6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
|
||||
7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
|
||||
|
@ -1469,20 +1562,22 @@ var file_google_api_client_proto_depIdxs = []int32{
|
|||
2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
|
||||
17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
|
||||
17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
|
||||
17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
|
||||
18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions
|
||||
19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions
|
||||
19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
|
||||
29, // [29:29] is the sub-list for method output_type
|
||||
29, // [29:29] is the sub-list for method input_type
|
||||
29, // [29:29] is the sub-list for extension type_name
|
||||
26, // [26:29] is the sub-list for extension extendee
|
||||
0, // [0:26] is the sub-list for field type_name
|
||||
15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
|
||||
16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
|
||||
2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
|
||||
17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
|
||||
19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
|
||||
19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
|
||||
19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
|
||||
20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
|
||||
21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
|
||||
21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
|
||||
31, // [31:31] is the sub-list for method output_type
|
||||
31, // [31:31] is the sub-list for method input_type
|
||||
31, // [31:31] is the sub-list for extension type_name
|
||||
28, // [28:31] is the sub-list for extension extendee
|
||||
0, // [0:28] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_api_client_proto_init() }
|
||||
|
@ -1635,7 +1730,7 @@ func file_google_api_client_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
|
||||
file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MethodSettings_LongRunning); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -1654,7 +1749,7 @@ func file_google_api_client_proto_init() {
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_api_client_proto_rawDesc,
|
||||
NumEnums: 2,
|
||||
NumMessages: 14,
|
||||
NumMessages: 16,
|
||||
NumExtensions: 3,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
|
25
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
25
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
|
@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly.
|
|||
both author's & review's time is wasted. Create more PRs to address different
|
||||
concerns and everyone will be happy.
|
||||
|
||||
- If you are searching for features to work on, issues labeled [Status: Help
|
||||
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
|
||||
is a great place to start. These issues are well-documented and usually can be
|
||||
resolved with a single pull request.
|
||||
|
||||
- If you are adding a new file, make sure it has the copyright message template
|
||||
at the top as a comment. You can copy over the message from an existing file
|
||||
and update the year.
|
||||
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. If your contribution introduces new dependencies which are NOT
|
||||
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
||||
|
@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly.
|
|||
- Provide a good **PR description** as a record of **what** change is being made
|
||||
and **why** it was made. Link to a github issue if it exists.
|
||||
|
||||
- Don't fix code style and formatting unless you are already changing that line
|
||||
to address an issue. PRs with irrelevant changes won't be merged. If you do
|
||||
want to fix formatting or style, do that in a separate PR.
|
||||
- If you want to fix formatting or style, consider whether your changes are an
|
||||
obvious improvement or might be considered a personal preference. If a style
|
||||
change is based on preference, it likely will not be accepted. If it corrects
|
||||
widely agreed-upon anti-patterns, then please do create a PR and explain the
|
||||
benefits of the change.
|
||||
|
||||
- Unless your PR is trivial, you should expect there will be reviewer comments
|
||||
that you'll need to address before merging. We expect you to be reasonably
|
||||
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
|
||||
of inactivity.
|
||||
that you'll need to address before merging. We'll mark it as `Status: Requires
|
||||
Reporter Clarification` if we expect you to respond to these comments in a
|
||||
timely manner. If the PR remains inactive for 6 days, it will be marked as
|
||||
`stale` and automatically close 7 days after that if we don't hear back from
|
||||
you.
|
||||
|
||||
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
||||
with messy commit history are difficult to review and won't be merged. Use
|
||||
|
|
29
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
29
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
|
@ -25,6 +25,11 @@
|
|||
// later release.
|
||||
package attributes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys. Values should not be modified after they are added to an
|
||||
|
@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// String prints the attribute map. If any key or values throughout the map
|
||||
// implement fmt.Stringer, it calls that method and appends.
|
||||
func (a *Attributes) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("{")
|
||||
first := true
|
||||
for k, v := range a.m {
|
||||
var key, val string
|
||||
if str, ok := k.(interface{ String() string }); ok {
|
||||
key = str.String()
|
||||
}
|
||||
if str, ok := v.(interface{ String() string }); ok {
|
||||
val = str.String()
|
||||
}
|
||||
if !first {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
||||
first = false
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
|
2
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
|
@ -286,7 +286,7 @@ type PickResult struct {
|
|||
//
|
||||
// LB policies with child policies are responsible for propagating metadata
|
||||
// injected by their children to the ClientConn, as part of Pick().
|
||||
Metatada metadata.MD
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
|
|
4
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
generated
vendored
4
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go
generated
vendored
|
@ -19,8 +19,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/lb/v1/load_balancer.proto
|
||||
|
||||
package grpc_lb_v1
|
||||
|
|
10
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
10
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
|
@ -19,8 +19,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v4.22.0
|
||||
// source: grpc/lb/v1/load_balancer.proto
|
||||
|
||||
package grpc_lb_v1
|
||||
|
@ -37,6 +37,10 @@ import (
|
|||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad"
|
||||
)
|
||||
|
||||
// LoadBalancerClient is the client API for LoadBalancer service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
|
@ -54,7 +58,7 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient {
|
|||
}
|
||||
|
||||
func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
458
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
458
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
|
@ -25,14 +25,20 @@ import (
|
|||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type ccbMode int
|
||||
|
||||
const (
|
||||
ccbModeActive = iota
|
||||
ccbModeIdle
|
||||
ccbModeClosed
|
||||
ccbModeExitingIdle
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
|
@ -49,137 +55,55 @@ import (
|
|||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
opts balancer.BuildOptions
|
||||
|
||||
// Since these fields are accessed only from handleXxx() methods which are
|
||||
// synchronized by the watcher goroutine, we do not need a mutex to protect
|
||||
// these fields.
|
||||
// Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled in the serializer. Fields
|
||||
// accessed *only* in these serializer callbacks, can therefore be accessed
|
||||
// without a mutex.
|
||||
balancer *gracefulswitch.Balancer
|
||||
curBalancerName string
|
||||
|
||||
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
|
||||
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
|
||||
closed *grpcsync.Event // Indicates if close has been called.
|
||||
done *grpcsync.Event // Indicates if close has completed its work.
|
||||
// mu guards access to the below fields. Access to the serializer and its
|
||||
// cancel function needs to be mutex protected because they are overwritten
|
||||
// when the wrapper exits idle mode.
|
||||
mu sync.Mutex
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
|
||||
serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
|
||||
mode ccbMode // Tracks the current mode of the wrapper.
|
||||
}
|
||||
|
||||
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||
// is not created until the switchTo() method is invoked.
|
||||
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
updateCh: buffer.NewUnbounded(),
|
||||
resultCh: buffer.NewUnbounded(),
|
||||
closed: grpcsync.NewEvent(),
|
||||
done: grpcsync.NewEvent(),
|
||||
opts: bopts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// The following xxxUpdate structs wrap the arguments received as part of the
|
||||
// corresponding update. The watcher goroutine uses the 'type' of the update to
|
||||
// invoke the appropriate handler routine to handle the update.
|
||||
|
||||
type ccStateUpdate struct {
|
||||
ccs *balancer.ClientConnState
|
||||
}
|
||||
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
err error
|
||||
}
|
||||
|
||||
type exitIdleUpdate struct{}
|
||||
|
||||
type resolverErrorUpdate struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type switchToUpdate struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type subConnUpdate struct {
|
||||
acbw *acBalancerWrapper
|
||||
}
|
||||
|
||||
// watcher is a long-running goroutine which reads updates from a channel and
|
||||
// invokes corresponding methods on the underlying balancer. It ensures that
|
||||
// these methods are invoked in a synchronous fashion. It also ensures that
|
||||
// these methods are invoked in the order in which the updates were received.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case u := <-ccb.updateCh.Get():
|
||||
ccb.updateCh.Load()
|
||||
if ccb.closed.HasFired() {
|
||||
break
|
||||
}
|
||||
switch update := u.(type) {
|
||||
case *ccStateUpdate:
|
||||
ccb.handleClientConnStateChange(update.ccs)
|
||||
case *scStateUpdate:
|
||||
ccb.handleSubConnStateChange(update)
|
||||
case *exitIdleUpdate:
|
||||
ccb.handleExitIdle()
|
||||
case *resolverErrorUpdate:
|
||||
ccb.handleResolverError(update.err)
|
||||
case *switchToUpdate:
|
||||
ccb.handleSwitchTo(update.name)
|
||||
case *subConnUpdate:
|
||||
ccb.handleRemoveSubConn(update.acbw)
|
||||
default:
|
||||
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
|
||||
}
|
||||
case <-ccb.closed.Done():
|
||||
}
|
||||
|
||||
if ccb.closed.HasFired() {
|
||||
ccb.handleClose()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer.
|
||||
//
|
||||
// Unlike other methods invoked by grpc to push updates to the underlying
|
||||
// balancer, this method cannot simply push the update onto the update channel
|
||||
// and return. It needs to return the error returned by the underlying balancer
|
||||
// back to grpc which propagates that to the resolver.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
|
||||
|
||||
var res interface{}
|
||||
select {
|
||||
case res = <-ccb.resultCh.Get():
|
||||
ccb.resultCh.Load()
|
||||
case <-ccb.closed.Done():
|
||||
// Return early if the balancer wrapper is closed while we are waiting for
|
||||
// the underlying balancer to process a ClientConnState update.
|
||||
return nil
|
||||
}
|
||||
// If the returned error is nil, attempting to type assert to error leads to
|
||||
// panic. So, this needs to handled separately.
|
||||
if res == nil {
|
||||
return nil
|
||||
}
|
||||
return res.(error)
|
||||
}
|
||||
|
||||
// handleClientConnStateChange handles a ClientConnState update from the update
|
||||
// channel and invokes the appropriate method on the underlying balancer.
|
||||
//
|
||||
// If the addresses specified in the update contain addresses of type "grpclb"
|
||||
// and the selected LB policy is not "grpclb", these addresses will be filtered
|
||||
// out and ccs will be modified with the updated address list.
|
||||
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
|
||||
ccb.mu.Lock()
|
||||
errCh := make(chan error, 1)
|
||||
// Here and everywhere else where Schedule() is called, it is done with the
|
||||
// lock held. But the lock guards only the scheduling part. The actual
|
||||
// callback is called asynchronously without the lock being held.
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// If the addresses specified in the update contain addresses of type
|
||||
// "grpclb" and the selected LB policy is not "grpclb", these addresses
|
||||
// will be filtered out and ccs will be modified with the updated
|
||||
// address list.
|
||||
if ccb.curBalancerName != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
var addrs []resolver.Address
|
||||
for _, addr := range ccs.ResolverState.Addresses {
|
||||
if addr.Type == resolver.GRPCLB {
|
||||
|
@ -189,52 +113,43 @@ func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientCo
|
|||
}
|
||||
ccs.ResolverState.Addresses = addrs
|
||||
}
|
||||
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
|
||||
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
||||
})
|
||||
if !ok {
|
||||
// If we are unable to schedule a function with the serializer, it
|
||||
// indicates that it has been closed. A serializer is only closed when
|
||||
// the wrapper is closed or is in idle.
|
||||
ccb.mu.Unlock()
|
||||
return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// We get here only if the above call to Schedule succeeds, in which case it
|
||||
// is guaranteed that the scheduled function will run. Therefore it is safe
|
||||
// to block on this channel.
|
||||
err := <-errCh
|
||||
if logger.V(2) && err != nil {
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.updateCh.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
})
|
||||
}
|
||||
|
||||
// handleSubConnStateChange handles a SubConnState update from the update
|
||||
// channel and invokes the appropriate method on the underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
|
||||
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||
ccb.updateCh.Put(&exitIdleUpdate{})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleExitIdle() {
|
||||
if ccb.cc.GetState() != connectivity.Idle {
|
||||
return
|
||||
}
|
||||
ccb.balancer.ExitIdle()
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.ResolverError(err)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||
|
@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) {
|
|||
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||
// the graceful balancer switching process if the name does not change.
|
||||
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||
ccb.updateCh.Put(&switchToUpdate{name: name})
|
||||
}
|
||||
|
||||
// handleSwitchTo handles a balancer switch update from the update channel. It
|
||||
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
|
||||
// balancer.Builder corresponding to name. If no balancer.Builder is registered
|
||||
// for the given name, it uses the default LB policy which is "pick_first".
|
||||
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
||||
// TODO: Other languages use case-insensitive balancer registries. We should
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// TODO: Other languages use case-sensitive balancer registries. We should
|
||||
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||
return
|
||||
}
|
||||
ccb.buildLoadBalancingPolicy(name)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// TODO: Ensure that name is a registered LB policy when we get here.
|
||||
// We currently only validate the `loadBalancingConfig` field. We need to do
|
||||
// the same for the `loadBalancingPolicy` field and reject the service config
|
||||
// if the specified policy is not registered.
|
||||
// buildLoadBalancingPolicy performs the following:
|
||||
// - retrieve a balancer builder for the given name. Use the default LB
|
||||
// policy, pick_first, if no LB policy with name is found in the registry.
|
||||
// - instruct the gracefulswitch balancer to switch to the above builder. This
|
||||
// will actually build the new balancer.
|
||||
// - update the `curBalancerName` field
|
||||
//
|
||||
// Must be called from a serializer callback.
|
||||
func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||
|
@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
|
|||
ccb.curBalancerName = builder.Name()
|
||||
}
|
||||
|
||||
// handleRemoveSucConn handles a request from the underlying balancer to remove
|
||||
// a subConn.
|
||||
//
|
||||
// See comments in RemoveSubConn() for more details.
|
||||
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.closed.Fire()
|
||||
<-ccb.done.Done()
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
|
||||
ccb.closeBalancer(ccbModeClosed)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleClose() {
|
||||
ccb.balancer.Close()
|
||||
ccb.done.Fire()
|
||||
// enterIdleMode is invoked by grpc when the channel enters idle mode upon
|
||||
// expiry of idle_timeout. This call blocks until the balancer is closed.
|
||||
func (ccb *ccBalancerWrapper) enterIdleMode() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
|
||||
ccb.closeBalancer(ccbModeIdle)
|
||||
}
|
||||
|
||||
// closeBalancer is invoked when the channel is being closed or when it enters
|
||||
// idle mode upon expiry of idle_timeout.
|
||||
func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mode = m
|
||||
done := ccb.serializer.Done
|
||||
b := ccb.balancer
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Close the serializer to ensure that no more calls from gRPC are sent
|
||||
// to the balancer.
|
||||
ccb.serializerCancel()
|
||||
// Empty the current balancer name because we don't have a balancer
|
||||
// anymore and also so that we act on the next call to switchTo by
|
||||
// creating a new balancer specified by the new resolver.
|
||||
ccb.curBalancerName = ""
|
||||
})
|
||||
if !ok {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish.
|
||||
<-done
|
||||
// Spawn a goroutine to close the balancer (since it may block trying to
|
||||
// cleanup all allocated resources) and return early.
|
||||
go b.Close()
|
||||
}
|
||||
|
||||
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
||||
// because of an RPC or because of an invocation of the Connect() API. This
|
||||
// recreates the balancer that was closed previously when entering idle mode.
|
||||
//
|
||||
// If the channel is not in idle mode, we know for a fact that we are here as a
|
||||
// result of the user calling the Connect() method on the ClientConn. In this
|
||||
// case, we can simply forward the call to the underlying balancer, instructing
|
||||
// it to reconnect to the backends.
|
||||
func (ccb *ccBalancerWrapper) exitIdleMode() {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed {
|
||||
// Request to exit idle is a no-op when wrapper is already closed.
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if ccb.mode == ccbModeIdle {
|
||||
// Recreate the serializer which was closed when we entered idle.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
|
||||
ccb.serializerCancel = cancel
|
||||
}
|
||||
|
||||
// The ClientConn guarantees that mutual exclusion between close() and
|
||||
// exitIdleMode(), and since we just created a new serializer, we can be
|
||||
// sure that the below function will be scheduled.
|
||||
done := make(chan struct{})
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
defer close(done)
|
||||
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
|
||||
if ccb.mode != ccbModeIdle {
|
||||
ccb.balancer.ExitIdle()
|
||||
return
|
||||
}
|
||||
|
||||
// Gracefulswitch balancer does not support a switchTo operation after
|
||||
// being closed. Hence we need to create a new one here.
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
||||
ccb.mode = ccbModeActive
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
|
||||
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) <= 0 {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||
|
@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
|||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
|
||||
acbw.ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
acbw.ac.mu.Unlock()
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
|
||||
// was required to handle the RemoveSubConn() method asynchronously by pushing
|
||||
// the update onto the update channel. This was done to avoid a deadlock as
|
||||
// switchBalancer() was holding cc.mu when calling Close() on the old
|
||||
// balancer, which would in turn call RemoveSubConn().
|
||||
if ccb.isIdleOrClosed() {
|
||||
// It it safe to ignore this call when the balancer is closed or in idle
|
||||
// because the ClientConn takes care of closing the connections.
|
||||
//
|
||||
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
|
||||
// asynchronously is probably not required anymore since the switchTo() method
|
||||
// handles the balancer switch by pushing the update onto the channel.
|
||||
// TODO(easwars): Handle this inline.
|
||||
// Not returning early from here when the balancer is closed or in idle
|
||||
// leads to a deadlock though, because of the following sequence of
|
||||
// calls when holding cc.mu:
|
||||
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
||||
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
|
||||
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
|
@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
|||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
|
@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
|||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
|
@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string {
|
|||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
ac *addrConn // read-only
|
||||
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) String() string {
|
||||
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
opts := acbw.ac.scopts
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
newAC, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = newAC
|
||||
newAC.mu.Lock()
|
||||
newAC.acbw = acbw
|
||||
newAC.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
go newAC.connect()
|
||||
}
|
||||
}
|
||||
acbw.ac.updateAddrs(addrs)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
go acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
}
|
||||
|
||||
var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
|
||||
|
||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||
// ready, returns errSubConnNotReady.
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport := acbw.ac.getReadyTransport()
|
||||
if transport == nil {
|
||||
return nil, errSubConnNotReady
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
|
|
4
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
4
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
|
@ -18,8 +18,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
|
|
5
vendor/google.golang.org/grpc/call.go
generated
vendored
5
vendor/google.golang.org/grpc/call.go
generated
vendored
|
@ -27,6 +27,11 @@ import (
|
|||
//
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cc.idlenessMgr.onCallEnd()
|
||||
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
|
597
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
597
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -24,7 +24,6 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -69,6 +68,9 @@ var (
|
|||
errConnDrain = errors.New("grpc: the connection is drained")
|
||||
// errConnClosing indicates that the connection is closing.
|
||||
errConnClosing = errors.New("grpc: the connection is closing")
|
||||
// errConnIdling indicates the the connection is being closed as the channel
|
||||
// is moving to an idle mode due to inactivity.
|
||||
errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
|
||||
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
||||
// service config.
|
||||
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
||||
|
@ -138,17 +140,39 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
csMgr: &connectivityStateManager{},
|
||||
conns: make(map[*addrConn]struct{}),
|
||||
dopts: defaultDialOptions(),
|
||||
blockingpicker: newPickerWrapper(),
|
||||
czData: new(channelzData),
|
||||
firstResolveEvent: grpcsync.NewEvent(),
|
||||
}
|
||||
|
||||
// We start the channel off in idle mode, but kick it out of idle at the end
|
||||
// of this method, instead of waiting for the first RPC. Other gRPC
|
||||
// implementations do wait for the first RPC to kick the channel out of
|
||||
// idle. But doing so would be a major behavior change for our users who are
|
||||
// used to seeing the channel active after Dial.
|
||||
//
|
||||
// Taking this approach of kicking it out of idle at the end of this method
|
||||
// allows us to share the code between channel creation and exiting idle
|
||||
// mode. This will also make it easy for us to switch to starting the
|
||||
// channel off in idle, if at all we ever get to do that.
|
||||
cc.idlenessState = ccIdlenessStateIdle
|
||||
|
||||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
|
||||
cc.ctx, cc.cancel = context.WithCancel(context.Background())
|
||||
cc.exitIdleCond = sync.NewCond(&cc.mu)
|
||||
|
||||
for _, opt := range extraDialOptions {
|
||||
disableGlobalOpts := false
|
||||
for _, opt := range opts {
|
||||
if _, ok := opt.(*disableGlobalDialOptions); ok {
|
||||
disableGlobalOpts = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !disableGlobalOpts {
|
||||
for _, opt := range globalDialOptions {
|
||||
opt.apply(&cc.dopts)
|
||||
}
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt.apply(&cc.dopts)
|
||||
|
@ -163,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
}()
|
||||
|
||||
pid := cc.dopts.channelzParentID
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
|
||||
ted := &channelz.TraceEventDesc{
|
||||
Desc: "Channel created",
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
if cc.dopts.channelzParentID != nil {
|
||||
ted.Parent = &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
}
|
||||
channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
|
||||
cc.csMgr.channelzID = cc.channelzID
|
||||
// Register ClientConn with channelz.
|
||||
cc.channelzRegistration(target)
|
||||
|
||||
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
||||
return nil, errNoTransportSecurity
|
||||
}
|
||||
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
||||
return nil, errTransportCredsAndBundle
|
||||
}
|
||||
if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
|
||||
return nil, errNoTransportCredsInBundle
|
||||
}
|
||||
transportCreds := cc.dopts.copts.TransportCredentials
|
||||
if transportCreds == nil {
|
||||
transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
|
||||
}
|
||||
if transportCreds.Info().SecurityProtocol == "insecure" {
|
||||
for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
||||
if cd.RequireTransportSecurity() {
|
||||
return nil, errTransportCredentialsMissing
|
||||
}
|
||||
}
|
||||
if err := cc.validateTransportCredentials(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cc.dopts.defaultServiceConfigRawJSON != nil {
|
||||
|
@ -234,35 +229,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
}()
|
||||
|
||||
scSet := false
|
||||
if cc.dopts.scChan != nil {
|
||||
// Try to get an initial service config.
|
||||
select {
|
||||
case sc, ok := <-cc.dopts.scChan:
|
||||
if ok {
|
||||
cc.sc = &sc
|
||||
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
|
||||
scSet = true
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = backoff.DefaultExponential
|
||||
}
|
||||
|
||||
// Determine the resolver to use.
|
||||
resolverBuilder, err := cc.parseTargetAndFindResolver()
|
||||
if err != nil {
|
||||
if err := cc.parseTargetAndFindResolver(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
|
||||
if err != nil {
|
||||
if err = cc.determineAuthority(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
||||
|
||||
if cc.dopts.scChan != nil && !scSet {
|
||||
if cc.dopts.scChan != nil {
|
||||
// Blocking wait for the initial service config.
|
||||
select {
|
||||
case sc, ok := <-cc.dopts.scChan:
|
||||
|
@ -278,36 +257,29 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
go cc.scWatcher()
|
||||
}
|
||||
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
// This creates the name resolver, load balancer, blocking picker etc.
|
||||
if err := cc.exitIdleMode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
Authority: cc.authority,
|
||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||
ChannelzParentID: cc.channelzID,
|
||||
Target: cc.parsedTarget,
|
||||
})
|
||||
|
||||
// Build the resolver.
|
||||
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||
// Configure idleness support with configured idle timeout or default idle
|
||||
// timeout duration. Idleness can be explicitly disabled by the user, by
|
||||
// setting the dial option to 0.
|
||||
cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout)
|
||||
|
||||
// Return early for non-blocking dials.
|
||||
if !cc.dopts.block {
|
||||
return cc, nil
|
||||
}
|
||||
cc.mu.Lock()
|
||||
cc.resolverWrapper = rWrapper
|
||||
cc.mu.Unlock()
|
||||
|
||||
// A blocking dial blocks until the clientConn is ready.
|
||||
if cc.dopts.block {
|
||||
for {
|
||||
cc.Connect()
|
||||
s := cc.GetState()
|
||||
if s == connectivity.Idle {
|
||||
cc.Connect()
|
||||
}
|
||||
if s == connectivity.Ready {
|
||||
break
|
||||
return cc, nil
|
||||
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
||||
if err = cc.connectionError(); err != nil {
|
||||
terr, ok := err.(interface {
|
||||
|
@ -328,7 +300,181 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
}
|
||||
}
|
||||
|
||||
return cc, nil
|
||||
// addTraceEvent is a helper method to add a trace event on the channel. If the
|
||||
// channel is a nested one, the same event is also added on the parent channel.
|
||||
func (cc *ClientConn) addTraceEvent(msg string) {
|
||||
ted := &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Channel %s", msg),
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
if cc.dopts.channelzParentID != nil {
|
||||
ted.Parent = &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
}
|
||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||
}
|
||||
|
||||
// exitIdleMode moves the channel out of idle mode by recreating the name
|
||||
// resolver and load balancer.
|
||||
func (cc *ClientConn) exitIdleMode() error {
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return errConnClosing
|
||||
}
|
||||
if cc.idlenessState != ccIdlenessStateIdle {
|
||||
cc.mu.Unlock()
|
||||
logger.Info("ClientConn asked to exit idle mode when not in idle mode")
|
||||
return nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// When Close() and exitIdleMode() race against each other, one of the
|
||||
// following two can happen:
|
||||
// - Close() wins the race and runs first. exitIdleMode() runs after, and
|
||||
// sees that the ClientConn is already closed and hence returns early.
|
||||
// - exitIdleMode() wins the race and runs first and recreates the balancer
|
||||
// and releases the lock before recreating the resolver. If Close() runs
|
||||
// in this window, it will wait for exitIdleMode to complete.
|
||||
//
|
||||
// We achieve this synchronization using the below condition variable.
|
||||
cc.mu.Lock()
|
||||
cc.idlenessState = ccIdlenessStateActive
|
||||
cc.exitIdleCond.Signal()
|
||||
cc.mu.Unlock()
|
||||
}()
|
||||
|
||||
cc.idlenessState = ccIdlenessStateExitingIdle
|
||||
exitedIdle := false
|
||||
if cc.blockingpicker == nil {
|
||||
cc.blockingpicker = newPickerWrapper()
|
||||
} else {
|
||||
cc.blockingpicker.exitIdleMode()
|
||||
exitedIdle = true
|
||||
}
|
||||
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
}
|
||||
if cc.balancerWrapper == nil {
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
Authority: cc.authority,
|
||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||
ChannelzParentID: cc.channelzID,
|
||||
Target: cc.parsedTarget,
|
||||
})
|
||||
} else {
|
||||
cc.balancerWrapper.exitIdleMode()
|
||||
}
|
||||
cc.firstResolveEvent = grpcsync.NewEvent()
|
||||
cc.mu.Unlock()
|
||||
|
||||
// This needs to be called without cc.mu because this builds a new resolver
|
||||
// which might update state or report error inline which needs to be handled
|
||||
// by cc.updateResolverState() which also grabs cc.mu.
|
||||
if err := cc.initResolverWrapper(credsClone); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exitedIdle {
|
||||
cc.addTraceEvent("exiting idle mode")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
|
||||
// name resolver, load balancer and any subchannels.
|
||||
func (cc *ClientConn) enterIdleMode() error {
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
if cc.idlenessState != ccIdlenessStateActive {
|
||||
logger.Error("ClientConn asked to enter idle mode when not active")
|
||||
return nil
|
||||
}
|
||||
|
||||
// cc.conns == nil is a proxy for the ClientConn being closed. So, instead
|
||||
// of setting it to nil here, we recreate the map. This also means that we
|
||||
// don't have to do this when exiting idle mode.
|
||||
conns := cc.conns
|
||||
cc.conns = make(map[*addrConn]struct{})
|
||||
|
||||
// TODO: Currently, we close the resolver wrapper upon entering idle mode
|
||||
// and create a new one upon exiting idle mode. This means that the
|
||||
// `cc.resolverWrapper` field would be overwritten everytime we exit idle
|
||||
// mode. While this means that we need to hold `cc.mu` when accessing
|
||||
// `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
|
||||
// try to do the same for the balancer and picker wrappers too.
|
||||
cc.resolverWrapper.close()
|
||||
cc.blockingpicker.enterIdleMode()
|
||||
cc.balancerWrapper.enterIdleMode()
|
||||
cc.csMgr.updateState(connectivity.Idle)
|
||||
cc.idlenessState = ccIdlenessStateIdle
|
||||
cc.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
cc.addTraceEvent("entering idle mode")
|
||||
for ac := range conns {
|
||||
ac.tearDown(errConnIdling)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateTransportCredentials performs a series of checks on the configured
|
||||
// transport credentials. It returns a non-nil error if any of these conditions
|
||||
// are met:
|
||||
// - no transport creds and no creds bundle is configured
|
||||
// - both transport creds and creds bundle are configured
|
||||
// - creds bundle is configured, but it lacks a transport credentials
|
||||
// - insecure transport creds configured alongside call creds that require
|
||||
// transport level security
|
||||
//
|
||||
// If none of the above conditions are met, the configured credentials are
|
||||
// deemed valid and a nil error is returned.
|
||||
func (cc *ClientConn) validateTransportCredentials() error {
|
||||
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
||||
return errNoTransportSecurity
|
||||
}
|
||||
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
||||
return errTransportCredsAndBundle
|
||||
}
|
||||
if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
|
||||
return errNoTransportCredsInBundle
|
||||
}
|
||||
transportCreds := cc.dopts.copts.TransportCredentials
|
||||
if transportCreds == nil {
|
||||
transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
|
||||
}
|
||||
if transportCreds.Info().SecurityProtocol == "insecure" {
|
||||
for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
||||
if cd.RequireTransportSecurity() {
|
||||
return errTransportCredentialsMissing
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// channelzRegistration registers the newly created ClientConn with channelz and
|
||||
// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
|
||||
// A channelz trace event is emitted for ClientConn creation. If the newly
|
||||
// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
|
||||
// specified via a dial option, the trace event is also added to the parent.
|
||||
//
|
||||
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
||||
func (cc *ClientConn) channelzRegistration(target string) {
|
||||
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
|
||||
cc.addTraceEvent("created")
|
||||
cc.csMgr.channelzID = cc.channelzID
|
||||
}
|
||||
|
||||
// chainUnaryClientInterceptors chains all unary client interceptors into one.
|
||||
|
@ -474,7 +620,9 @@ type ClientConn struct {
|
|||
authority string // See determineAuthority().
|
||||
dopts dialOptions // Default and user specified dial options.
|
||||
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
|
||||
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
|
||||
idlenessMgr idlenessManager
|
||||
|
||||
// The following provide their own synchronization, and therefore don't
|
||||
// require cc.mu to be held to access them.
|
||||
|
@ -495,11 +643,31 @@ type ClientConn struct {
|
|||
sc *ServiceConfig // Latest service config received from the resolver.
|
||||
conns map[*addrConn]struct{} // Set to nil on close.
|
||||
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
|
||||
idlenessState ccIdlenessState // Tracks idleness state of the channel.
|
||||
exitIdleCond *sync.Cond // Signalled when channel exits idle.
|
||||
|
||||
lceMu sync.Mutex // protects lastConnectionError
|
||||
lastConnectionError error
|
||||
}
|
||||
|
||||
// ccIdlenessState tracks the idleness state of the channel.
|
||||
//
|
||||
// Channels start off in `active` and move to `idle` after a period of
|
||||
// inactivity. When moving back to `active` upon an incoming RPC, they
|
||||
// transition through `exiting_idle`. This state is useful for synchronization
|
||||
// with Close().
|
||||
//
|
||||
// This state tracking is mostly for self-protection. The idlenessManager is
|
||||
// expected to keep track of the state as well, and is expected not to call into
|
||||
// the ClientConn unnecessarily.
|
||||
type ccIdlenessState int8
|
||||
|
||||
const (
|
||||
ccIdlenessStateActive ccIdlenessState = iota
|
||||
ccIdlenessStateIdle
|
||||
ccIdlenessStateExitingIdle
|
||||
)
|
||||
|
||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||
// ctx expires. A true value is returned in former case and false in latter.
|
||||
//
|
||||
|
@ -539,7 +707,10 @@ func (cc *ClientConn) GetState() connectivity.State {
|
|||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||||
// release.
|
||||
func (cc *ClientConn) Connect() {
|
||||
cc.balancerWrapper.exitIdle()
|
||||
cc.exitIdleMode()
|
||||
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
|
||||
// LB policy so that connections can be created.
|
||||
cc.balancerWrapper.exitIdleMode()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) scWatcher() {
|
||||
|
@ -708,6 +879,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
|
|||
dopts: cc.dopts,
|
||||
czData: new(channelzData),
|
||||
resetBackoff: make(chan struct{}),
|
||||
stateChan: make(chan struct{}),
|
||||
}
|
||||
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
|
||||
// Track ac in cc. This needs to be done before any getTransport(...) is called.
|
||||
|
@ -801,9 +973,6 @@ func (ac *addrConn) connect() error {
|
|||
ac.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
// Update connectivity state within the lock to prevent subsequent or
|
||||
// concurrent calls from resetting the transport more than once.
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.mu.Unlock()
|
||||
|
||||
ac.resetTransport()
|
||||
|
@ -822,58 +991,62 @@ func equalAddresses(a, b []resolver.Address) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
|
||||
//
|
||||
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
|
||||
// addresses will be picked up by retry in the next iteration after backoff.
|
||||
//
|
||||
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
|
||||
//
|
||||
// If the addresses is the same as the old list, it does nothing and returns
|
||||
// true.
|
||||
//
|
||||
// If ac is Connecting, it returns false. The caller should tear down the ac and
|
||||
// create a new one. Note that the backoff will be reset when this happens.
|
||||
//
|
||||
// If ac is Ready, it checks whether current connected address of ac is in the
|
||||
// new addrs list.
|
||||
// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||||
// the existing connection.
|
||||
// - If false, it does nothing and returns false.
|
||||
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||||
// updateAddrs updates ac.addrs with the new addresses list and handles active
|
||||
// connections or connection attempts.
|
||||
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
|
||||
|
||||
if equalAddresses(ac.addrs, addrs) {
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
ac.addrs = addrs
|
||||
|
||||
if ac.state == connectivity.Shutdown ||
|
||||
ac.state == connectivity.TransientFailure ||
|
||||
ac.state == connectivity.Idle {
|
||||
ac.addrs = addrs
|
||||
return true
|
||||
// We were not connecting, so do nothing but update the addresses.
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if equalAddresses(ac.addrs, addrs) {
|
||||
return true
|
||||
}
|
||||
|
||||
if ac.state == connectivity.Connecting {
|
||||
return false
|
||||
}
|
||||
|
||||
// ac.state is Ready, try to find the connected address.
|
||||
var curAddrFound bool
|
||||
if ac.state == connectivity.Ready {
|
||||
// Try to find the connected address.
|
||||
for _, a := range addrs {
|
||||
a.ServerName = ac.cc.getServerName(a)
|
||||
if reflect.DeepEqual(ac.curAddr, a) {
|
||||
curAddrFound = true
|
||||
break
|
||||
if a.Equal(ac.curAddr) {
|
||||
// We are connected to a valid address, so do nothing but
|
||||
// update the addresses.
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||
if curAddrFound {
|
||||
ac.addrs = addrs
|
||||
}
|
||||
|
||||
return curAddrFound
|
||||
// We are either connected to the wrong address or currently connecting.
|
||||
// Stop the current iteration and restart.
|
||||
|
||||
ac.cancel()
|
||||
ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
|
||||
|
||||
// We have to defer here because GracefulClose => Close => onClose, which
|
||||
// requires locking ac.mu.
|
||||
if ac.transport != nil {
|
||||
defer ac.transport.GracefulClose()
|
||||
ac.transport = nil
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
ac.updateConnectivityState(connectivity.Idle, nil)
|
||||
}
|
||||
|
||||
ac.mu.Unlock()
|
||||
|
||||
// Since we were connecting/connected, we should start a new connection
|
||||
// attempt.
|
||||
go ac.resetTransport()
|
||||
}
|
||||
|
||||
// getServerName determines the serverName to be used in the connection
|
||||
|
@ -1026,39 +1199,40 @@ func (cc *ClientConn) Close() error {
|
|||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
|
||||
for cc.idlenessState == ccIdlenessStateExitingIdle {
|
||||
cc.exitIdleCond.Wait()
|
||||
}
|
||||
|
||||
conns := cc.conns
|
||||
cc.conns = nil
|
||||
cc.csMgr.updateState(connectivity.Shutdown)
|
||||
|
||||
pWrapper := cc.blockingpicker
|
||||
rWrapper := cc.resolverWrapper
|
||||
cc.resolverWrapper = nil
|
||||
bWrapper := cc.balancerWrapper
|
||||
idlenessMgr := cc.idlenessMgr
|
||||
cc.mu.Unlock()
|
||||
|
||||
// The order of closing matters here since the balancer wrapper assumes the
|
||||
// picker is closed before it is closed.
|
||||
cc.blockingpicker.close()
|
||||
if pWrapper != nil {
|
||||
pWrapper.close()
|
||||
}
|
||||
if bWrapper != nil {
|
||||
bWrapper.close()
|
||||
}
|
||||
if rWrapper != nil {
|
||||
rWrapper.close()
|
||||
}
|
||||
if idlenessMgr != nil {
|
||||
idlenessMgr.close()
|
||||
}
|
||||
|
||||
for ac := range conns {
|
||||
ac.tearDown(ErrClientConnClosing)
|
||||
}
|
||||
ted := &channelz.TraceEventDesc{
|
||||
Desc: "Channel deleted",
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
if cc.dopts.channelzParentID != nil {
|
||||
ted.Parent = &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
|
||||
Severity: channelz.CtInfo,
|
||||
}
|
||||
}
|
||||
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
|
||||
cc.addTraceEvent("deleted")
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
|
||||
// trace reference to the entity being deleted, and thus prevent it from being
|
||||
// deleted right away.
|
||||
|
@ -1089,6 +1263,7 @@ type addrConn struct {
|
|||
|
||||
// Use updateConnectivityState for updating addrConn's connectivity state.
|
||||
state connectivity.State
|
||||
stateChan chan struct{} // closed and recreated on every state change.
|
||||
|
||||
backoffIdx int // Needs to be stateful for resetConnectBackoff.
|
||||
resetBackoff chan struct{}
|
||||
|
@ -1102,8 +1277,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
|
|||
if ac.state == s {
|
||||
return
|
||||
}
|
||||
// When changing states, reset the state change channel.
|
||||
close(ac.stateChan)
|
||||
ac.stateChan = make(chan struct{})
|
||||
ac.state = s
|
||||
if lastErr == nil {
|
||||
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
|
||||
} else {
|
||||
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
|
||||
}
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
|
||||
}
|
||||
|
||||
|
@ -1123,7 +1305,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
|
|||
|
||||
func (ac *addrConn) resetTransport() {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
acCtx := ac.ctx
|
||||
if acCtx.Err() != nil {
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
@ -1151,15 +1334,14 @@ func (ac *addrConn) resetTransport() {
|
|||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.mu.Unlock()
|
||||
|
||||
if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
|
||||
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
|
||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||
// After exhausting all addresses, the addrConn enters
|
||||
// TRANSIENT_FAILURE.
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
ac.mu.Unlock()
|
||||
if acCtx.Err() != nil {
|
||||
return
|
||||
}
|
||||
ac.mu.Lock()
|
||||
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
||||
|
||||
// Backoff.
|
||||
|
@ -1174,13 +1356,13 @@ func (ac *addrConn) resetTransport() {
|
|||
ac.mu.Unlock()
|
||||
case <-b:
|
||||
timer.Stop()
|
||||
case <-ac.ctx.Done():
|
||||
case <-acCtx.Done():
|
||||
timer.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
ac.mu.Lock()
|
||||
if ac.state != connectivity.Shutdown {
|
||||
if acCtx.Err() == nil {
|
||||
ac.updateConnectivityState(connectivity.Idle, err)
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
|
@ -1195,14 +1377,13 @@ func (ac *addrConn) resetTransport() {
|
|||
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
|
||||
// the first successful one. It returns an error if no address was successfully
|
||||
// connected, or updates ac appropriately with the new transport.
|
||||
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
|
||||
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
|
||||
var firstConnErr error
|
||||
for _, addr := range addrs {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
ac.mu.Unlock()
|
||||
if ctx.Err() != nil {
|
||||
return errConnClosing
|
||||
}
|
||||
ac.mu.Lock()
|
||||
|
||||
ac.cc.mu.RLock()
|
||||
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
|
||||
|
@ -1216,7 +1397,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|||
|
||||
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
|
||||
|
||||
err := ac.createTransport(addr, copts, connectDeadline)
|
||||
err := ac.createTransport(ctx, addr, copts, connectDeadline)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -1233,19 +1414,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
|||
// createTransport creates a connection to addr. It returns an error if the
|
||||
// address was not successfully connected, or updates ac appropriately with the
|
||||
// new transport.
|
||||
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
|
||||
func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
|
||||
addr.ServerName = ac.cc.getServerName(addr)
|
||||
hctx, hcancel := context.WithCancel(ac.ctx)
|
||||
hctx, hcancel := context.WithCancel(ctx)
|
||||
|
||||
onClose := func(r transport.GoAwayReason) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
// adjust params based on GoAwayReason
|
||||
ac.adjustParams(r)
|
||||
if ac.state == connectivity.Shutdown {
|
||||
// Already shut down. tearDown() already cleared the transport and
|
||||
// canceled hctx via ac.ctx, and we expected this connection to be
|
||||
// closed, so do nothing here.
|
||||
if ctx.Err() != nil {
|
||||
// Already shut down or connection attempt canceled. tearDown() or
|
||||
// updateAddrs() already cleared the transport and canceled hctx
|
||||
// via ac.ctx, and we expected this connection to be closed, so do
|
||||
// nothing here.
|
||||
return
|
||||
}
|
||||
hcancel()
|
||||
|
@ -1264,7 +1446,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
ac.updateConnectivityState(connectivity.Idle, nil)
|
||||
}
|
||||
|
||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||
connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
|
||||
defer cancel()
|
||||
copts.ChannelzParentID = ac.channelzID
|
||||
|
||||
|
@ -1281,7 +1463,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
if ctx.Err() != nil {
|
||||
// This can happen if the subConn was removed while in `Connecting`
|
||||
// state. tearDown() would have set the state to `Shutdown`, but
|
||||
// would not have closed the transport since ac.transport would not
|
||||
|
@ -1293,6 +1475,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
// The error we pass to Close() is immaterial since there are no open
|
||||
// streams at this point, so no trailers with error details will be sent
|
||||
// out. We just need to pass a non-nil error.
|
||||
//
|
||||
// This can also happen when updateAddrs is called during a connection
|
||||
// attempt.
|
||||
go newTr.Close(transport.ErrConnClosing)
|
||||
return nil
|
||||
}
|
||||
|
@ -1400,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
|
|||
return nil
|
||||
}
|
||||
|
||||
// getTransport waits until the addrconn is ready and returns the transport.
|
||||
// If the context expires first, returns an appropriate status. If the
|
||||
// addrConn is stopped first, returns an Unavailable status error.
|
||||
func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
|
||||
for ctx.Err() == nil {
|
||||
ac.mu.Lock()
|
||||
t, state, sc := ac.transport, ac.state, ac.stateChan
|
||||
ac.mu.Unlock()
|
||||
if state == connectivity.Ready {
|
||||
return t, nil
|
||||
}
|
||||
if state == connectivity.Shutdown {
|
||||
return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-sc:
|
||||
}
|
||||
}
|
||||
return nil, status.FromContextError(ctx.Err()).Err()
|
||||
}
|
||||
|
||||
// tearDown starts to tear down the addrConn.
|
||||
//
|
||||
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
|
||||
|
@ -1527,6 +1735,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
|
|||
// referenced by users.
|
||||
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||
|
||||
// getResolver finds the scheme in the cc's resolvers or the global registry.
|
||||
// scheme should always be lowercase (typically by virtue of url.Parse()
|
||||
// performing proper RFC3986 behavior).
|
||||
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||
for _, rb := range cc.dopts.resolvers {
|
||||
if scheme == rb.Scheme() {
|
||||
|
@ -1548,7 +1759,14 @@ func (cc *ClientConn) connectionError() error {
|
|||
return cc.lastConnectionError
|
||||
}
|
||||
|
||||
func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
||||
// parseTargetAndFindResolver parses the user's dial target and stores the
|
||||
// parsed target in `cc.parsedTarget`.
|
||||
//
|
||||
// The resolver to use is determined based on the scheme in the parsed target
|
||||
// and the same is stored in `cc.resolverBuilder`.
|
||||
//
|
||||
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
||||
func (cc *ClientConn) parseTargetAndFindResolver() error {
|
||||
channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
|
||||
|
||||
var rb resolver.Builder
|
||||
|
@ -1560,7 +1778,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|||
rb = cc.getResolver(parsedTarget.URL.Scheme)
|
||||
if rb != nil {
|
||||
cc.parsedTarget = parsedTarget
|
||||
return rb, nil
|
||||
cc.resolverBuilder = rb
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1575,15 +1794,16 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|||
parsedTarget, err = parseTarget(canonicalTarget)
|
||||
if err != nil {
|
||||
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
|
||||
rb = cc.getResolver(parsedTarget.URL.Scheme)
|
||||
if rb == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
|
||||
return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
|
||||
}
|
||||
cc.parsedTarget = parsedTarget
|
||||
return rb, nil
|
||||
cc.resolverBuilder = rb
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||
|
@ -1606,7 +1826,15 @@ func parseTarget(target string) (resolver.Target, error) {
|
|||
// - user specified authority override using `WithAuthority` dial option
|
||||
// - creds' notion of server name for the authentication handshake
|
||||
// - endpoint from dial target of the form "scheme://[authority]/endpoint"
|
||||
func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
|
||||
//
|
||||
// Stores the determined authority in `cc.authority`.
|
||||
//
|
||||
// Returns a non-nil error if the authority returned by the transport
|
||||
// credentials do not match the authority configured through the dial option.
|
||||
//
|
||||
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
|
||||
func (cc *ClientConn) determineAuthority() error {
|
||||
dopts := cc.dopts
|
||||
// Historically, we had two options for users to specify the serverName or
|
||||
// authority for a channel. One was through the transport credentials
|
||||
// (either in its constructor, or through the OverrideServerName() method).
|
||||
|
@ -1623,25 +1851,58 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err
|
|||
}
|
||||
authorityFromDialOption := dopts.authority
|
||||
if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
|
||||
return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
|
||||
return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
|
||||
}
|
||||
|
||||
endpoint := cc.parsedTarget.Endpoint()
|
||||
target := cc.target
|
||||
switch {
|
||||
case authorityFromDialOption != "":
|
||||
return authorityFromDialOption, nil
|
||||
cc.authority = authorityFromDialOption
|
||||
case authorityFromCreds != "":
|
||||
return authorityFromCreds, nil
|
||||
cc.authority = authorityFromCreds
|
||||
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
|
||||
// TODO: remove when the unix resolver implements optional interface to
|
||||
// return channel authority.
|
||||
return "localhost", nil
|
||||
cc.authority = "localhost"
|
||||
case strings.HasPrefix(endpoint, ":"):
|
||||
return "localhost" + endpoint, nil
|
||||
cc.authority = "localhost" + endpoint
|
||||
default:
|
||||
// TODO: Define an optional interface on the resolver builder to return
|
||||
// the channel authority given the user's dial target. For resolvers
|
||||
// which don't implement this interface, we will use the endpoint from
|
||||
// "scheme://authority/endpoint" as the default authority.
|
||||
return endpoint, nil
|
||||
cc.authority = endpoint
|
||||
}
|
||||
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
|
||||
return nil
|
||||
}
|
||||
|
||||
// initResolverWrapper creates a ccResolverWrapper, which builds the name
|
||||
// resolver. This method grabs the lock to assign the newly built resolver
|
||||
// wrapper to the cc.resolverWrapper field.
|
||||
func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
|
||||
rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
|
||||
target: cc.parsedTarget,
|
||||
builder: cc.resolverBuilder,
|
||||
bOpts: resolver.BuildOptions{
|
||||
DisableServiceConfig: cc.dopts.disableServiceConfig,
|
||||
DialCreds: creds,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
},
|
||||
channelzID: cc.channelzID,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to build resolver: %v", err)
|
||||
}
|
||||
// Resolver implementations may report state update or error inline when
|
||||
// built (or right after), and this is handled in cc.updateResolverState.
|
||||
// Also, an error from the resolver might lead to a re-resolution request
|
||||
// from the balancer, which is handled in resolveNow() where
|
||||
// `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
|
||||
cc.mu.Lock()
|
||||
cc.resolverWrapper = rw
|
||||
cc.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
|
51
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
51
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
|
@ -18,7 +18,15 @@
|
|||
|
||||
package codes
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.CanonicalString = canonicalString
|
||||
}
|
||||
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
|
@ -60,3 +68,44 @@ func (c Code) String() string {
|
|||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
||||
func canonicalString(c Code) string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "CANCELLED"
|
||||
case Unknown:
|
||||
return "UNKNOWN"
|
||||
case InvalidArgument:
|
||||
return "INVALID_ARGUMENT"
|
||||
case DeadlineExceeded:
|
||||
return "DEADLINE_EXCEEDED"
|
||||
case NotFound:
|
||||
return "NOT_FOUND"
|
||||
case AlreadyExists:
|
||||
return "ALREADY_EXISTS"
|
||||
case PermissionDenied:
|
||||
return "PERMISSION_DENIED"
|
||||
case ResourceExhausted:
|
||||
return "RESOURCE_EXHAUSTED"
|
||||
case FailedPrecondition:
|
||||
return "FAILED_PRECONDITION"
|
||||
case Aborted:
|
||||
return "ABORTED"
|
||||
case OutOfRange:
|
||||
return "OUT_OF_RANGE"
|
||||
case Unimplemented:
|
||||
return "UNIMPLEMENTED"
|
||||
case Internal:
|
||||
return "INTERNAL"
|
||||
case Unavailable:
|
||||
return "UNAVAILABLE"
|
||||
case DataLoss:
|
||||
return "DATA_LOSS"
|
||||
case Unauthenticated:
|
||||
return "UNAUTHENTICATED"
|
||||
default:
|
||||
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
|
52
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
52
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go
generated
vendored
|
@ -138,7 +138,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions {
|
|||
// and server options (server options struct does not exist now. When
|
||||
// caller can provide endpoints, it should be created.
|
||||
|
||||
// altsHandshaker is used to complete a ALTS handshaking between client and
|
||||
// altsHandshaker is used to complete an ALTS handshake between client and
|
||||
// server. This handshaker talks to the ALTS handshaker service in the metadata
|
||||
// server.
|
||||
type altsHandshaker struct {
|
||||
|
@ -146,6 +146,8 @@ type altsHandshaker struct {
|
|||
stream altsgrpc.HandshakerService_DoHandshakeClient
|
||||
// the connection to the peer.
|
||||
conn net.Conn
|
||||
// a virtual connection to the ALTS handshaker service.
|
||||
clientConn *grpc.ClientConn
|
||||
// client handshake options.
|
||||
clientOpts *ClientHandshakerOptions
|
||||
// server handshake options.
|
||||
|
@ -154,39 +156,33 @@ type altsHandshaker struct {
|
|||
side core.Side
|
||||
}
|
||||
|
||||
// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC
|
||||
// stub created using the passed conn and used to talk to the ALTS Handshaker
|
||||
// NewClientHandshaker creates a core.Handshaker that performs a client-side
|
||||
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
|
||||
// service in the metadata server.
|
||||
func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &altsHandshaker{
|
||||
stream: stream,
|
||||
stream: nil,
|
||||
conn: c,
|
||||
clientConn: conn,
|
||||
clientOpts: opts,
|
||||
side: core.ClientSide,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC
|
||||
// stub created using the passed conn and used to talk to the ALTS Handshaker
|
||||
// NewServerHandshaker creates a core.Handshaker that performs a server-side
|
||||
// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker
|
||||
// service in the metadata server.
|
||||
func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &altsHandshaker{
|
||||
stream: stream,
|
||||
stream: nil,
|
||||
conn: c,
|
||||
clientConn: conn,
|
||||
serverOpts: opts,
|
||||
side: core.ServerSide,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once
|
||||
// ClientHandshake starts and completes a client ALTS handshake for GCP. Once
|
||||
// done, ClientHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
|
@ -198,6 +194,16 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent
|
|||
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
|
||||
}
|
||||
|
||||
// TODO(matthewstevenson88): Change unit tests to use public APIs so
|
||||
// that h.stream can unconditionally be set based on h.clientConn.
|
||||
if h.stream == nil {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err)
|
||||
}
|
||||
h.stream = stream
|
||||
}
|
||||
|
||||
// Create target identities from service account list.
|
||||
targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts))
|
||||
for _, account := range h.clientOpts.TargetServiceAccounts {
|
||||
|
@ -229,7 +235,7 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent
|
|||
return conn, authInfo, nil
|
||||
}
|
||||
|
||||
// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once
|
||||
// ServerHandshake starts and completes a server ALTS handshake for GCP. Once
|
||||
// done, ServerHandshake returns a secure connection.
|
||||
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
|
||||
if !acquire() {
|
||||
|
@ -241,6 +247,16 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent
|
|||
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
|
||||
}
|
||||
|
||||
// TODO(matthewstevenson88): Change unit tests to use public APIs so
|
||||
// that h.stream can unconditionally be set based on h.clientConn.
|
||||
if h.stream == nil {
|
||||
stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err)
|
||||
}
|
||||
h.stream = stream
|
||||
}
|
||||
|
||||
p := make([]byte, frameLimit)
|
||||
n, err := h.conn.Read(p)
|
||||
if err != nil {
|
||||
|
@ -371,5 +387,7 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b
|
|||
// Close terminates the Handshaker. It should be called when the caller obtains
|
||||
// the secure connection.
|
||||
func (h *altsHandshaker) Close() {
|
||||
if h.stream != nil {
|
||||
h.stream.CloseSend()
|
||||
}
|
||||
}
|
||||
|
|
18
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
generated
vendored
18
vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go
generated
vendored
|
@ -58,3 +58,21 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) {
|
|||
}
|
||||
return hsConn, nil
|
||||
}
|
||||
|
||||
// CloseForTesting closes all open connections to the handshaker service.
|
||||
//
|
||||
// For testing purposes only.
|
||||
func CloseForTesting() error {
|
||||
for _, hsConn := range hsConnMap {
|
||||
if hsConn == nil {
|
||||
continue
|
||||
}
|
||||
if err := hsConn.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the connection map.
|
||||
hsConnMap = make(map[string]*grpc.ClientConn)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/gcp/altscontext.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/gcp/handshaker.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v4.22.0
|
||||
// source: grpc/gcp/handshaker.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
@ -35,6 +35,10 @@ import (
|
|||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake"
|
||||
)
|
||||
|
||||
// HandshakerServiceClient is the client API for HandshakerService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
|
@ -57,7 +61,7 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl
|
|||
}
|
||||
|
||||
func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/gcp/transport_security_common.proto
|
||||
|
||||
package grpc_gcp
|
||||
|
|
47
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
47
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -38,13 +38,14 @@ import (
|
|||
|
||||
func init() {
|
||||
internal.AddGlobalDialOptions = func(opt ...DialOption) {
|
||||
extraDialOptions = append(extraDialOptions, opt...)
|
||||
globalDialOptions = append(globalDialOptions, opt...)
|
||||
}
|
||||
internal.ClearGlobalDialOptions = func() {
|
||||
extraDialOptions = nil
|
||||
globalDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
|
@ -76,6 +77,7 @@ type dialOptions struct {
|
|||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
resolvers []resolver.Builder
|
||||
idleTimeout time.Duration
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
|
@ -83,7 +85,7 @@ type DialOption interface {
|
|||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
var extraDialOptions []DialOption
|
||||
var globalDialOptions []DialOption
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
|
@ -96,6 +98,16 @@ type EmptyDialOption struct{}
|
|||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
||||
type disableGlobalDialOptions struct{}
|
||||
|
||||
func (disableGlobalDialOptions) apply(*dialOptions) {}
|
||||
|
||||
// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
|
||||
// from applying the global DialOptions (set via AddGlobalDialOptions).
|
||||
func newDisableGlobalDialOptions() DialOption {
|
||||
return &disableGlobalDialOptions{}
|
||||
}
|
||||
|
||||
// funcDialOption wraps a function that modifies dialOptions into an
|
||||
// implementation of the DialOption interface.
|
||||
type funcDialOption struct {
|
||||
|
@ -284,6 +296,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
|||
// WithBlock returns a DialOption which makes callers of Dial block until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
|
@ -295,6 +310,9 @@ func WithBlock() DialOption {
|
|||
// the context.DeadlineExceeded error.
|
||||
// Implies WithBlock()
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
|
@ -437,6 +455,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
|
|||
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||
// anything useful unless you are also using WithBlock().
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
|
@ -635,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
|
|||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithIdleTimeout returns a DialOption that configures an idle timeout for the
|
||||
// channel. If the channel is idle for the configured timeout, i.e there are no
|
||||
// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
|
||||
// and as a result the name resolver and load balancer will be shut down. The
|
||||
// channel will exit idle mode when the Connect() method is called or when an
|
||||
// RPC is initiated.
|
||||
//
|
||||
// By default this feature is disabled, which can also be explicitly configured
|
||||
// by passing zero to this function.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithIdleTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.idleTimeout = d
|
||||
})
|
||||
}
|
||||
|
|
287
vendor/google.golang.org/grpc/idle.go
generated
vendored
Normal file
287
vendor/google.golang.org/grpc/idle.go
generated
vendored
Normal file
|
@ -0,0 +1,287 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For overriding in unit tests.
|
||||
var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
||||
// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
|
||||
// and exit from idle mode.
|
||||
type idlenessEnforcer interface {
|
||||
exitIdleMode() error
|
||||
enterIdleMode() error
|
||||
}
|
||||
|
||||
// idlenessManager defines the functionality required to track RPC activity on a
|
||||
// channel.
|
||||
type idlenessManager interface {
|
||||
onCallBegin() error
|
||||
onCallEnd()
|
||||
close()
|
||||
}
|
||||
|
||||
type noopIdlenessManager struct{}
|
||||
|
||||
func (noopIdlenessManager) onCallBegin() error { return nil }
|
||||
func (noopIdlenessManager) onCallEnd() {}
|
||||
func (noopIdlenessManager) close() {}
|
||||
|
||||
// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
|
||||
// operations to synchronize access to shared state and a mutex to guarantee
|
||||
// mutual exclusion in a critical section.
|
||||
type idlenessManagerImpl struct {
|
||||
// State accessed atomically.
|
||||
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
||||
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
||||
activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
|
||||
closed int32 // Boolean; True when the manager is closed.
|
||||
|
||||
// Can be accessed without atomics or mutex since these are set at creation
|
||||
// time and read-only after that.
|
||||
enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
|
||||
timeout int64 // Idle timeout duration nanos stored as an int64.
|
||||
|
||||
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
||||
// - Opposing intentions:
|
||||
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
||||
// the channel in idle mode because the channel has been inactive.
|
||||
// - b: At the same time an RPC is made on the channel, and onCallBegin()
|
||||
// is trying to prevent the channel from going idle.
|
||||
// - Competing intentions:
|
||||
// - The channel is in idle mode and there are multiple RPCs starting at
|
||||
// the same time, all trying to move the channel out of idle. Only one
|
||||
// of them should succeed in doing so, while the other RPCs should
|
||||
// piggyback on the first one and be successfully handled.
|
||||
idleMu sync.RWMutex
|
||||
actuallyIdle bool
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// newIdlenessManager creates a new idleness manager implementation for the
|
||||
// given idle timeout.
|
||||
func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
|
||||
if idleTimeout == 0 {
|
||||
return noopIdlenessManager{}
|
||||
}
|
||||
|
||||
i := &idlenessManagerImpl{
|
||||
enforcer: enforcer,
|
||||
timeout: int64(idleTimeout),
|
||||
}
|
||||
i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
|
||||
return i
|
||||
}
|
||||
|
||||
// resetIdleTimer resets the idle timer to the given duration. This method
|
||||
// should only be called from the timer callback.
|
||||
func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if i.timer == nil {
|
||||
// Only close sets timer to nil. We are done.
|
||||
return
|
||||
}
|
||||
|
||||
// It is safe to ignore the return value from Reset() because this method is
|
||||
// only ever called from the timer callback, which means the timer has
|
||||
// already fired.
|
||||
i.timer.Reset(d)
|
||||
}
|
||||
|
||||
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
||||
// configured idle timeout. The channel is considered inactive if there are no
|
||||
// ongoing calls and no RPC activity since the last time the timer fired.
|
||||
func (i *idlenessManagerImpl) handleIdleTimeout() {
|
||||
if i.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&i.activeCallsCount) > 0 {
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// There has been activity on the channel since we last got here. Reset the
|
||||
// timer and return.
|
||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
||||
// Set the timer to fire after a duration of idle timeout, calculated
|
||||
// from the time the most recent RPC completed.
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
|
||||
i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
|
||||
return
|
||||
}
|
||||
|
||||
// This CAS operation is extremely likely to succeed given that there has
|
||||
// been no activity since the last time we were here. Setting the
|
||||
// activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
|
||||
// channel is either in idle mode or is trying to get there.
|
||||
if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
|
||||
// This CAS operation can fail if an RPC started after we checked for
|
||||
// activity at the top of this method, or one was ongoing from before
|
||||
// the last time we were here. In both case, reset the timer and return.
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
||||
// actually move to idle mode.
|
||||
if i.tryEnterIdleMode() {
|
||||
// Successfully entered idle mode. No timer needed until we exit idle.
|
||||
return
|
||||
}
|
||||
|
||||
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
||||
// active, or because of an error from the channel. Undo the attempt to
|
||||
// enter idle, and reset the timer to try again later.
|
||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
}
|
||||
|
||||
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
||||
// that, it performs a last minute check to ensure that no new RPC has come in,
|
||||
// making the channel active.
|
||||
//
|
||||
// Return value indicates whether or not the channel moved to idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
||||
func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
|
||||
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
||||
return false
|
||||
}
|
||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
||||
// An very short RPC could have come in (and also finished) after we
|
||||
// checked for calls count and activity in handleIdleTimeout(), but
|
||||
// before the CAS operation. So, we need to check for activity again.
|
||||
return false
|
||||
}
|
||||
|
||||
// No new RPCs have come in since we last set the active calls count value
|
||||
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
||||
// safe to enter idle mode now.
|
||||
if err := i.enforcer.enterIdleMode(); err != nil {
|
||||
logger.Errorf("Failed to enter idle mode: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Successfully entered idle mode.
|
||||
i.actuallyIdle = true
|
||||
return true
|
||||
}
|
||||
|
||||
// onCallBegin is invoked at the start of every RPC.
|
||||
func (i *idlenessManagerImpl) onCallBegin() error {
|
||||
if i.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
|
||||
// Channel is not idle now. Set the activity bit and allow the call.
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Channel is either in idle mode or is in the process of moving to idle
|
||||
// mode. Attempt to exit idle mode to allow this RPC.
|
||||
if err := i.exitIdleMode(); err != nil {
|
||||
// Undo the increment to calls count, and return an error causing the
|
||||
// RPC to fail.
|
||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitIdleMode instructs the channel to exit idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||
func (i *idlenessManagerImpl) exitIdleMode() error {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if !i.actuallyIdle {
|
||||
// This can happen in two scenarios:
|
||||
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
||||
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
||||
// came in and onCallBegin() noticed that the calls count is negative.
|
||||
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
||||
// time, all of them notice a negative calls count in onCallBegin and get
|
||||
// here. The first one to get the lock would got the channel to exit idle.
|
||||
//
|
||||
// Either way, nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := i.enforcer.exitIdleMode(); err != nil {
|
||||
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
||||
}
|
||||
|
||||
// Undo the idle entry process. This also respects any new RPC attempts.
|
||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
||||
i.actuallyIdle = false
|
||||
|
||||
// Start a new timer to fire after the configured idle timeout.
|
||||
i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// onCallEnd is invoked at the end of every RPC.
|
||||
func (i *idlenessManagerImpl) onCallEnd() {
|
||||
if i.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Record the time at which the most recent call finished.
|
||||
atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
|
||||
|
||||
// Decrement the active calls count. This count can temporarily go negative
|
||||
// when the timer callback is in the process of moving the channel to idle
|
||||
// mode, but one or more RPCs come in and complete before the timer callback
|
||||
// can get done with the process of moving to idle mode.
|
||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
||||
}
|
||||
|
||||
func (i *idlenessManagerImpl) isClosed() bool {
|
||||
return atomic.LoadInt32(&i.closed) == 1
|
||||
}
|
||||
|
||||
func (i *idlenessManagerImpl) close() {
|
||||
atomic.StoreInt32(&i.closed, 1)
|
||||
|
||||
i.idleMu.Lock()
|
||||
i.timer.Stop()
|
||||
i.timer = nil
|
||||
i.idleMu.Unlock()
|
||||
}
|
11
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
11
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
|
@ -28,8 +28,13 @@ import (
|
|||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// Logger specifies MethodLoggers for method names with a Log call that
|
||||
// takes a context.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type Logger interface {
|
||||
GetMethodLogger(methodName string) MethodLogger
|
||||
}
|
||||
|
@ -40,8 +45,6 @@ type Logger interface {
|
|||
// It is used to get a MethodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
|
|
14
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
14
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
|
@ -19,6 +19,7 @@
|
|||
package binarylog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
@ -48,8 +49,11 @@ func (g *callIDGenerator) reset() {
|
|||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type MethodLogger interface {
|
||||
Log(LogEntryConfig)
|
||||
Log(context.Context, LogEntryConfig)
|
||||
}
|
||||
|
||||
// TruncatingMethodLogger is a method logger that truncates headers and messages
|
||||
|
@ -64,6 +68,9 @@ type TruncatingMethodLogger struct {
|
|||
}
|
||||
|
||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
return &TruncatingMethodLogger{
|
||||
headerMaxLen: h,
|
||||
|
@ -98,7 +105,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
|
|||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
|
||||
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
|
@ -144,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun
|
|||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
|
26
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
26
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
|
@ -35,6 +35,7 @@ import "sync"
|
|||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
closed bool
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
|
@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
|
|||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
|
@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
|
|||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
|
@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
|
|||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
|
@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
|
|||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
//
|
||||
// If the unbounded buffer is closed, the read channel returned by this method
|
||||
// is closed.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// Close closes the unbounded buffer.
|
||||
func (b *Unbounded) Close() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
b.closed = true
|
||||
close(b.c)
|
||||
}
|
||||
|
|
4
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
|
@ -36,6 +36,10 @@ var (
|
|||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
// PickFirstLBConfig is set if we should support configuration of the
|
||||
// pick_first LB policy, which can be enabled by setting the environment
|
||||
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
|
|
6
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
|
@ -28,9 +28,15 @@ const (
|
|||
var (
|
||||
// ObservabilityConfig is the json configuration for the gcp/observability
|
||||
// package specified directly in the envObservabilityConfig env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfig = os.Getenv(envObservabilityConfig)
|
||||
// ObservabilityConfigFile is the json configuration for the
|
||||
// gcp/observability specified in a file with the location specified in
|
||||
// envObservabilityConfigFile env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
|
||||
)
|
||||
|
|
21
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
|
@ -61,11 +61,10 @@ var (
|
|||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
||||
// DNS cluster is enabled, which can be disabled by setting the environment
|
||||
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
// to "false".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
|
@ -79,14 +78,18 @@ var (
|
|||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// support for the RLS CLuster Specifier is enabled, which can be disabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
|
||||
// "false".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
||||
// can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
||||
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
||||
)
|
||||
|
|
12
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
|
@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
|||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
|
@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
|||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func (pl *PrefixLogger) V(l int) bool {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
return Logger.V(l)
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
|
|
14
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
14
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
|
@ -72,3 +72,17 @@ func Uint64() uint64 {
|
|||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
||||
|
||||
// Uint32 implements rand.Uint32 on the grpcrand global source.
|
||||
func Uint32() uint32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint32()
|
||||
}
|
||||
|
||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||
var Shuffle = func(n int, f func(int, int)) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
r.Shuffle(n, f)
|
||||
}
|
||||
|
|
119
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
119
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
)
|
||||
|
||||
// CallbackSerializer provides a mechanism to schedule callbacks in a
|
||||
// synchronized manner. It provides a FIFO guarantee on the order of execution
|
||||
// of scheduled callbacks. New callbacks can be scheduled by invoking the
|
||||
// Schedule() method.
|
||||
//
|
||||
// This type is safe for concurrent access.
|
||||
type CallbackSerializer struct {
|
||||
// Done is closed once the serializer is shut down completely, i.e all
|
||||
// scheduled callbacks are executed and the serializer has deallocated all
|
||||
// its resources.
|
||||
Done chan struct{}
|
||||
|
||||
callbacks *buffer.Unbounded
|
||||
closedMu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
|
||||
// context will be passed to the scheduled callbacks. Users should cancel the
|
||||
// provided context to shutdown the CallbackSerializer. It is guaranteed that no
|
||||
// callbacks will be added once this context is canceled, and any pending un-run
|
||||
// callbacks will be executed before the serializer is shut down.
|
||||
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||
t := &CallbackSerializer{
|
||||
Done: make(chan struct{}),
|
||||
callbacks: buffer.NewUnbounded(),
|
||||
}
|
||||
go t.run(ctx)
|
||||
return t
|
||||
}
|
||||
|
||||
// Schedule adds a callback to be scheduled after existing callbacks are run.
|
||||
//
|
||||
// Callbacks are expected to honor the context when performing any blocking
|
||||
// operations, and should return early when the context is canceled.
|
||||
//
|
||||
// Return value indicates if the callback was successfully added to the list of
|
||||
// callbacks to be executed by the serializer. It is not possible to add
|
||||
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
||||
func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
||||
t.closedMu.Lock()
|
||||
defer t.closedMu.Unlock()
|
||||
|
||||
if t.closed {
|
||||
return false
|
||||
}
|
||||
t.callbacks.Put(f)
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *CallbackSerializer) run(ctx context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
|
||||
defer close(t.Done)
|
||||
for ctx.Err() == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do nothing here. Next iteration of the for loop will not happen,
|
||||
// since ctx.Err() would be non-nil.
|
||||
case callback, ok := <-t.callbacks.Get():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t.callbacks.Load()
|
||||
callback.(func(ctx context.Context))(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch pending callbacks if any, and execute them before returning from
|
||||
// this method and closing t.Done.
|
||||
t.closedMu.Lock()
|
||||
t.closed = true
|
||||
backlog = t.fetchPendingCallbacks()
|
||||
t.callbacks.Close()
|
||||
t.closedMu.Unlock()
|
||||
for _, b := range backlog {
|
||||
b(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
for {
|
||||
select {
|
||||
case b := <-t.callbacks.Get():
|
||||
backlog = append(backlog, b.(func(context.Context)))
|
||||
t.callbacks.Load()
|
||||
default:
|
||||
return backlog
|
||||
}
|
||||
}
|
||||
}
|
34
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
34
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
|
@ -58,6 +58,12 @@ var (
|
|||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
// CanonicalString returns the canonical string of the code defined here:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
CanonicalString interface{} // func (codes.Code) string
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
|
@ -66,16 +72,35 @@ var (
|
|||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
|
||||
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalServerOptions func()
|
||||
// AddGlobalDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalDialOptions interface{} // func(opt ...DialOption)
|
||||
// DisableGlobalDialOptions returns a DialOption that prevents the
|
||||
// ClientConn from applying the global DialOptions (set via
|
||||
// AddGlobalDialOptions).
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
DisableGlobalDialOptions interface{} // func() grpc.DialOption
|
||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
|
@ -86,9 +111,15 @@ var (
|
|||
|
||||
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
||||
// for a ClientConn.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
|
||||
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
||||
// server.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
|
@ -130,6 +161,9 @@ var (
|
|||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
||||
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
|
|
62
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
62
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
|
@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
|||
return addr
|
||||
}
|
||||
|
||||
// Validate returns an error if the input md contains invalid keys or values.
|
||||
//
|
||||
// If the header is not a pseudo-header, the following items are checked:
|
||||
// - header names must contain one or more characters from this set [0-9 a-z _ - .].
|
||||
// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed.
|
||||
// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E].
|
||||
// Validate validates every pair in md with ValidatePair.
|
||||
func Validate(md metadata.MD) error {
|
||||
for k, vals := range md {
|
||||
// pseudo-header will be ignored
|
||||
if k[0] == ':' {
|
||||
continue
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(k); i++ {
|
||||
r := k[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(k, "-bin") {
|
||||
continue
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k)
|
||||
}
|
||||
if err := ValidatePair(k, vals...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
|
||||
//
|
||||
// - key must contain one or more characters.
|
||||
// - the characters in the key must be contained in [0-9 a-z _ - .].
|
||||
// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
|
||||
// - the characters in the every value must be printable (in [%x20-%x7E]).
|
||||
func ValidatePair(key string, vals ...string) error {
|
||||
// key should not be empty
|
||||
if key == "" {
|
||||
return fmt.Errorf("there is an empty key in the header")
|
||||
}
|
||||
// pseudo-header will be ignored
|
||||
if key[0] == ':' {
|
||||
return nil
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(key); i++ {
|
||||
r := key[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(key, "-bin") {
|
||||
return nil
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
130
vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
130
vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Duration defines JSON marshal and unmarshal methods to conform to the
|
||||
// protobuf JSON spec defined [here].
|
||||
//
|
||||
// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
|
||||
type Duration time.Duration
|
||||
|
||||
func (d Duration) String() string {
|
||||
return fmt.Sprint(time.Duration(d))
|
||||
}
|
||||
|
||||
// MarshalJSON converts from d to a JSON string output.
|
||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(d).Nanoseconds()
|
||||
sec := ns / int64(time.Second)
|
||||
ns = ns % int64(time.Second)
|
||||
|
||||
var sign string
|
||||
if sec < 0 || ns < 0 {
|
||||
sign, sec, ns = "-", -1*sec, -1*ns
|
||||
}
|
||||
|
||||
// Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision.
|
||||
str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, ".000")
|
||||
return []byte(fmt.Sprintf("\"%ss\"", str)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b as a duration JSON string into d.
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasSuffix(s, "s") {
|
||||
return fmt.Errorf("malformed duration %q: missing seconds unit", s)
|
||||
}
|
||||
neg := false
|
||||
if s[0] == '-' {
|
||||
neg = true
|
||||
s = s[1:]
|
||||
}
|
||||
ss := strings.SplitN(s[:len(s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return fmt.Errorf("malformed duration %q: too many decimals", s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var sec, ns int64
|
||||
if len(ss[0]) > 0 {
|
||||
var err error
|
||||
if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
// Maximum seconds value per the durationpb spec.
|
||||
const maxProtoSeconds = 315_576_000_000
|
||||
if sec > maxProtoSeconds {
|
||||
return fmt.Errorf("out of range: %q", s)
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
|
||||
}
|
||||
var err error
|
||||
if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
ns *= 10
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return fmt.Errorf("malformed duration %q: contains no numbers", s)
|
||||
}
|
||||
|
||||
if neg {
|
||||
sec *= -1
|
||||
ns *= -1
|
||||
}
|
||||
|
||||
// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
|
||||
const maxSeconds = math.MaxInt64 / int64(time.Second)
|
||||
const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
|
||||
const minSeconds = math.MinInt64 / int64(time.Second)
|
||||
const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
|
||||
|
||||
if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
|
||||
*d = Duration(math.MaxInt64)
|
||||
} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
|
||||
*d = Duration(math.MinInt64)
|
||||
} else {
|
||||
*d = Duration(sec*int64(time.Second) + ns)
|
||||
}
|
||||
return nil
|
||||
}
|
93
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
93
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -29,6 +30,7 @@ import (
|
|||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -486,12 +488,14 @@ type loopyWriter struct {
|
|||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
conn net.Conn
|
||||
logger *grpclog.PrefixLogger
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
|
@ -504,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
|
|||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
conn: conn,
|
||||
logger: logger,
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
@ -521,15 +527,27 @@ const minBatchSize = 1000
|
|||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
// frame, loopy calls processData, which processes one node from the
|
||||
// activeStreams linked-list. This results in writing of HTTP2 frames into an
|
||||
// underlying write buffer. When there's no more control frames to read from
|
||||
// controlBuf, loopy flushes the write buffer. As an optimization, to increase
|
||||
// the batch size for each flush, loopy yields the processor, once if the batch
|
||||
// size is too low to give stream goroutines a chance to fill it up.
|
||||
//
|
||||
// Upon exiting, if the error causing the exit is not an I/O error, run()
|
||||
// flushes and closes the underlying connection. Otherwise, the connection is
|
||||
// left open to allow the I/O error to be encountered by the reader instead.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
// Always flush the writer before exiting in case there are pending frames
|
||||
// to be sent.
|
||||
defer l.framer.writer.Flush()
|
||||
defer func() {
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("loopyWriter exiting with error: %v", err)
|
||||
}
|
||||
if !isIOError(err) {
|
||||
l.framer.writer.Flush()
|
||||
l.conn.Close()
|
||||
}
|
||||
l.cbuf.finish()
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
|
@ -581,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
|
|||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
|
@ -593,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
|
|||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
|
@ -604,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
|||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
l.applySettings(s.ss)
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
|
@ -618,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
|||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -681,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
|||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Warningf("Encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -720,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
|
@ -732,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
|||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
|
@ -743,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
|
|||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
|
@ -763,6 +775,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
|||
}
|
||||
}
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
|
@ -798,6 +811,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
|||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("received GOAWAY with no active streams")
|
||||
}
|
||||
}
|
||||
|
@ -816,17 +830,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) closeConnectionHandler() error {
|
||||
// Exit loopyWriter entirely by returning an error here. This will lead to
|
||||
// the transport closing the connection, and, ultimately, transport
|
||||
// closure.
|
||||
return ErrConnClosing
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
|
@ -836,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error {
|
|||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *earlyAbortStream:
|
||||
|
@ -844,21 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error {
|
|||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
l.outFlowControlSizeRequestHandler(i)
|
||||
case closeConnection:
|
||||
return l.closeConnectionHandler()
|
||||
// Just return a non-I/O error and run() will flush and close the
|
||||
// connection.
|
||||
return ErrConnClosing
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
|
@ -877,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
|||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
|
@ -911,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
|
|
9
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
9
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
|
@ -39,6 +39,7 @@ import (
|
|||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
|
@ -83,6 +84,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
|||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
st.logger = prefixLoggerForServerHandlerTransport(st)
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
|
@ -151,12 +153,13 @@ type serverHandlerTransport struct {
|
|||
contentSubtype string
|
||||
|
||||
stats []stats.Handler
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close(err error) {
|
||||
ht.closeOnce.Do(func() {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Closing serverHandlerTransport: %v", err)
|
||||
if ht.logger.V(logLevel) {
|
||||
ht.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
close(ht.closedCh)
|
||||
})
|
||||
|
@ -450,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
|
|||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
func (ht *serverHandlerTransport) Drain(debugData string) {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
|
|
46
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
46
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
|
@ -38,6 +38,7 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
|
@ -145,6 +146,7 @@ type http2Client struct {
|
|||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
|
||||
|
@ -244,7 +246,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
if err := connectCtx.Err(); err != nil {
|
||||
// connectCtx expired before exiting the function. Hard close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
|
||||
logger.Infof("Aborting due to connect deadline expiring: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
|
@ -346,6 +348,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
}
|
||||
t.logger = prefixLoggerForClientTransport(t)
|
||||
// Add peer information to the http2client context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
|
@ -444,15 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||||
}
|
||||
// Do not close the transport. Let reader goroutine handle it since
|
||||
// there might be data in the buffers.
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
return t, nil
|
||||
|
@ -789,7 +785,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
if t.activeStreams == nil { // Can be niled from Close().
|
||||
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
|
||||
t.mu.Unlock()
|
||||
return false // Don't create a stream if the transport is already closed.
|
||||
}
|
||||
|
@ -866,8 +862,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
}
|
||||
}
|
||||
if transportDrainRequired {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: t.nextID > MaxStreamID. Draining")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
|
||||
}
|
||||
t.GracefulClose()
|
||||
}
|
||||
|
@ -959,8 +955,8 @@ func (t *http2Client) Close(err error) {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing: %v", err)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
// Call t.onClose ASAP to prevent the client from attempting to create new
|
||||
// streams.
|
||||
|
@ -1016,8 +1012,8 @@ func (t *http2Client) GracefulClose() {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: GracefulClose called")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("GracefulClose called")
|
||||
}
|
||||
t.onClose(GoAwayInvalid)
|
||||
t.state = draining
|
||||
|
@ -1181,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
}
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
|
@ -1264,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
|
||||
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
|
||||
// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
|
||||
// enabled by default and double the configure KEEPALIVE_TIME used for new connections
|
||||
// on that channel.
|
||||
logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
|
||||
}
|
||||
id := f.LastStreamID
|
||||
if id > 0 && id%2 == 0 {
|
||||
|
@ -1339,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
|
||||
// setGoAwayReason sets the value of t.goAwayReason based
|
||||
// on the GoAway frame received.
|
||||
// It expects a lock on transport's mutext to be held by
|
||||
// It expects a lock on transport's mutex to be held by
|
||||
// the caller.
|
||||
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
|
||||
t.goAwayReason = GoAwayNoReason
|
||||
|
|
99
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
99
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
@ -35,7 +35,9 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -129,6 +131,8 @@ type http2Server struct {
|
|||
// This lock may not be taken if mu is already held.
|
||||
maxStreamMu sync.Mutex
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
// NewServerTransport creates a http2 transport with conn and configuration
|
||||
|
@ -167,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
ID: http2.SettingMaxFrameSize,
|
||||
Val: http2MaxFrameLen,
|
||||
}}
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
maxStreams := config.MaxStreams
|
||||
if maxStreams == 0 {
|
||||
maxStreams = math.MaxUint32
|
||||
} else {
|
||||
if config.MaxStreams != math.MaxUint32 {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingMaxConcurrentStreams,
|
||||
Val: maxStreams,
|
||||
Val: config.MaxStreams,
|
||||
})
|
||||
}
|
||||
dynamicWindow := true
|
||||
|
@ -254,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
maxStreams: maxStreams,
|
||||
maxStreams: config.MaxStreams,
|
||||
inTapHandle: config.InTapHandle,
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
|
@ -267,6 +266,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.logger = prefixLoggerForServerTransport(t)
|
||||
// Add peer information to the http2server context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
|
@ -331,14 +331,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
t.handleSettings(sf)
|
||||
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
err := t.loopy.run()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||||
}
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
go t.keepalive()
|
||||
|
@ -383,7 +378,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
// if false, content-type was missing or invalid
|
||||
isGRPC = false
|
||||
contentType = ""
|
||||
mdata = make(map[string][]string)
|
||||
mdata = make(metadata.MD, len(frame.Fields))
|
||||
httpMethod string
|
||||
// these are set if an error is encountered while parsing the headers
|
||||
protocolError bool
|
||||
|
@ -404,6 +399,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
s.contentSubtype = contentSubtype
|
||||
isGRPC = true
|
||||
|
||||
case "grpc-accept-encoding":
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
if hf.Value == "" {
|
||||
continue
|
||||
}
|
||||
compressors := hf.Value
|
||||
if s.clientAdvertisedCompressors != "" {
|
||||
compressors = s.clientAdvertisedCompressors + "," + compressors
|
||||
}
|
||||
s.clientAdvertisedCompressors = compressors
|
||||
case "grpc-encoding":
|
||||
s.recvCompress = hf.Value
|
||||
case ":method":
|
||||
|
@ -419,8 +425,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
case "connection":
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
|
||||
}
|
||||
protocolError = true
|
||||
default:
|
||||
|
@ -430,7 +436,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], v)
|
||||
|
@ -444,8 +450,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
// error, this takes precedence over a client not speaking gRPC.
|
||||
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
|
||||
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: %v", errMsg)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
|
@ -539,9 +545,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
}
|
||||
if httpMethod != http.MethodPost {
|
||||
t.mu.Unlock()
|
||||
errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: %v", errMsg)
|
||||
errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 405,
|
||||
|
@ -557,8 +563,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
var err error
|
||||
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
|
||||
t.mu.Unlock()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
|
||||
}
|
||||
stat, ok := status.FromError(err)
|
||||
if !ok {
|
||||
|
@ -595,7 +601,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Header: mdata.Copy(),
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
|
@ -632,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Warningf("Encountered http2.StreamError: %v", se)
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
|
@ -676,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received unsupported frame type %T", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -936,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
|||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -1050,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
|
||||
} else {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
|
@ -1155,18 +1161,18 @@ func (t *http2Server) keepalive() {
|
|||
if val <= 0 {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.Drain()
|
||||
t.Drain("max_idle")
|
||||
return
|
||||
}
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.Drain()
|
||||
t.Drain("max_age")
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing server transport due to maximum connection age")
|
||||
}
|
||||
t.controlBuf.put(closeConnection{})
|
||||
case <-t.done:
|
||||
|
@ -1217,8 +1223,8 @@ func (t *http2Server) Close(err error) {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing: %v", err)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
|
@ -1226,8 +1232,8 @@ func (t *http2Server) Close(err error) {
|
|||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
close(t.done)
|
||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
|
||||
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Cancel all active streams.
|
||||
|
@ -1307,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
|||
return t.remoteAddr
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain() {
|
||||
func (t *http2Server) Drain(debugData string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.drainEvent != nil {
|
||||
return
|
||||
}
|
||||
t.drainEvent = grpcsync.NewEvent()
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
|
||||
}
|
||||
|
||||
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
|
@ -1344,9 +1350,6 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
if retErr != nil {
|
||||
// Abruptly close the connection following the GoAway (via
|
||||
// loopywriter). But flush out what's inside the buffer first.
|
||||
t.framer.writer.Flush()
|
||||
return false, retErr
|
||||
}
|
||||
return true, nil
|
||||
|
@ -1359,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|||
// originated before the GoAway reaches the client.
|
||||
// After getting the ack or timer expiration send out another GoAway this
|
||||
// time with an ID of the max stream server intends to process.
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
|
||||
|
|
26
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
26
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
|
@ -21,6 +21,7 @@ package transport
|
|||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -37,7 +38,6 @@ import (
|
|||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
|
@ -85,7 +85,6 @@ var (
|
|||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
|
@ -330,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
|||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
n, err = w.conn.Write(b)
|
||||
return n, toIOError(err)
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
|
@ -352,10 +352,30 @@ func (w *bufWriter) Flush() error {
|
|||
return nil
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.err = toIOError(w.err)
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type ioError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (i ioError) Unwrap() error {
|
||||
return i.error
|
||||
}
|
||||
|
||||
func isIOError(err error) bool {
|
||||
return errors.As(err, &ioError{})
|
||||
}
|
||||
|
||||
func toIOError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return ioError{error: err}
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
|
|
40
vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("transport")
|
||||
|
||||
func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
|
||||
}
|
25
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
25
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
|
@ -257,6 +257,9 @@ type Stream struct {
|
|||
fc *inFlow
|
||||
wq *writeQuota
|
||||
|
||||
// Holds compressor names passed in grpc-accept-encoding metadata from the
|
||||
// client. This is empty for the client side stream.
|
||||
clientAdvertisedCompressors string
|
||||
// Callback to state application's intentions to read data. This
|
||||
// is used to adjust flow control, if needed.
|
||||
requestRead func(int)
|
||||
|
@ -345,8 +348,24 @@ func (s *Stream) RecvCompress() string {
|
|||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
func (s *Stream) SetSendCompress(name string) error {
|
||||
if s.isHeaderSent() || s.getState() == streamDone {
|
||||
return errors.New("transport: set send compressor called after headers sent or stream done")
|
||||
}
|
||||
|
||||
s.sendCompress = name
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendCompress returns the send compressor name.
|
||||
func (s *Stream) SendCompress() string {
|
||||
return s.sendCompress
|
||||
}
|
||||
|
||||
// ClientAdvertisedCompressors returns the compressor names advertised by the
|
||||
// client via grpc-accept-encoding header.
|
||||
func (s *Stream) ClientAdvertisedCompressors() string {
|
||||
return s.clientAdvertisedCompressors
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed when it receives the final status
|
||||
|
@ -707,7 +726,7 @@ type ServerTransport interface {
|
|||
RemoteAddr() net.Addr
|
||||
|
||||
// Drain notifies the client this ServerTransport stops accepting new RPCs.
|
||||
Drain()
|
||||
Drain(debugData string)
|
||||
|
||||
// IncrMsgSent increments the number of message sent through this transport.
|
||||
IncrMsgSent()
|
||||
|
|
13
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
13
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
|
@ -91,7 +91,11 @@ func (md MD) Len() int {
|
|||
|
||||
// Copy returns a copy of md.
|
||||
func (md MD) Copy() MD {
|
||||
return Join(md)
|
||||
out := make(MD, len(md))
|
||||
for k, v := range md {
|
||||
out[k] = copyOf(v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Get obtains the values for a given key.
|
||||
|
@ -171,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context
|
|||
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||
added := make([][]string, len(md.added)+1)
|
||||
copy(added, md.added)
|
||||
added[len(added)-1] = make([]string, len(kv))
|
||||
copy(added[len(added)-1], kv)
|
||||
kvCopy := make([]string, 0, len(kv))
|
||||
for i := 0; i < len(kv); i += 2 {
|
||||
kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
|
||||
}
|
||||
added[len(added)-1] = kvCopy
|
||||
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||
}
|
||||
|
||||
|
|
38
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
38
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
|
@ -36,6 +36,7 @@ import (
|
|||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
idle bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
}
|
||||
|
@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper {
|
|||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
if pw.done || pw.idle {
|
||||
// There is a small window where a picker update from the LB policy can
|
||||
// race with the channel going to idle mode. If the picker is idle here,
|
||||
// it is because the channel asked it to do so, and therefore it is sage
|
||||
// to ignore the update from the LB policy.
|
||||
pw.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
@ -63,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
|||
// - wraps the done function in the passed in result to increment the calls
|
||||
// failed or calls succeeded channelz counter before invoking the actual
|
||||
// done function.
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
|
||||
ac := acbw.ac
|
||||
ac.incrCallsStarted()
|
||||
done := result.Done
|
||||
result.Done = func(b balancer.DoneInfo) {
|
||||
|
@ -152,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
|
||||
continue
|
||||
}
|
||||
if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
||||
if t := acbw.ac.getReadyTransport(); t != nil {
|
||||
if channelz.IsOn() {
|
||||
doneChannelzWrapper(acw, &pickResult)
|
||||
doneChannelzWrapper(acbw, &pickResult)
|
||||
return t, pickResult, nil
|
||||
}
|
||||
return t, pickResult, nil
|
||||
|
@ -187,6 +190,25 @@ func (pw *pickerWrapper) close() {
|
|||
close(pw.blockingCh)
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) enterIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.idle = true
|
||||
}
|
||||
|
||||
func (pw *pickerWrapper) exitIdleMode() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.idle = false
|
||||
}
|
||||
|
||||
// dropError is a wrapper error that indicates the LB policy wishes to drop the
|
||||
// RPC and not retry it.
|
||||
type dropError struct {
|
||||
|
|
52
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
52
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
|
@ -19,11 +19,15 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
|
@ -43,10 +47,28 @@ func (*pickfirstBuilder) Name() string {
|
|||
return PickFirstBalancerName
|
||||
}
|
||||
|
||||
type pfConfig struct {
|
||||
serviceconfig.LoadBalancingConfig `json:"-"`
|
||||
|
||||
// If set to true, instructs the LB policy to shuffle the order of the list
|
||||
// of addresses received from the name resolver before attempting to
|
||||
// connect to them.
|
||||
ShuffleAddressList bool `json:"shuffleAddressList"`
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
|
||||
cfg := &pfConfig{}
|
||||
if err := json.Unmarshal(js, cfg); err != nil {
|
||||
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
state connectivity.State
|
||||
cc balancer.ClientConn
|
||||
subConn balancer.SubConn
|
||||
cfg *pfConfig
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
|
@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) {
|
|||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
if len(state.ResolverState.Addresses) == 0 {
|
||||
addrs := state.ResolverState.Addresses
|
||||
if len(addrs) == 0 {
|
||||
// The resolver reported an empty address list. Treat it like an error by
|
||||
// calling b.ResolverError.
|
||||
if b.subConn != nil {
|
||||
|
@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
if state.BalancerConfig != nil {
|
||||
cfg, ok := state.BalancerConfig.(*pfConfig)
|
||||
if !ok {
|
||||
return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
|
||||
}
|
||||
b.cfg = cfg
|
||||
}
|
||||
|
||||
if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList {
|
||||
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
|
||||
}
|
||||
if b.subConn != nil {
|
||||
b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses)
|
||||
b.cc.UpdateAddresses(b.subConn, addrs)
|
||||
return nil
|
||||
}
|
||||
|
||||
subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
if logger.V(2) {
|
||||
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
|
@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|||
}
|
||||
return
|
||||
}
|
||||
b.state = state.ConnectivityState
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
b.subConn = nil
|
||||
return
|
||||
|
@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|||
Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
|
||||
})
|
||||
case connectivity.Connecting:
|
||||
if b.state == connectivity.TransientFailure {
|
||||
// We stay in TransientFailure until we are Ready. See A62.
|
||||
return
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: state.ConnectivityState,
|
||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||
})
|
||||
case connectivity.Idle:
|
||||
if b.state == connectivity.TransientFailure {
|
||||
// We stay in TransientFailure until we are Ready. Also kick the
|
||||
// subConn out of Idle into Connecting. See A62.
|
||||
b.subConn.Connect()
|
||||
return
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: state.ConnectivityState,
|
||||
Picker: &idlePicker{subConn: subConn},
|
||||
|
@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
|
|||
Picker: &picker{err: state.ConnectionError},
|
||||
})
|
||||
}
|
||||
b.state = state.ConnectivityState
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) Close() {
|
||||
|
|
36
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
36
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
|
@ -22,13 +22,13 @@ package resolver
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
|
@ -41,8 +41,9 @@ var (
|
|||
|
||||
// TODO(bar) install dns resolver in init(){}.
|
||||
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will be
|
||||
// used as the scheme registered with this builder.
|
||||
// Register registers the resolver builder to the resolver map. b.Scheme will
|
||||
// be used as the scheme registered with this builder. The registry is case
|
||||
// sensitive, and schemes should not contain any uppercase characters.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Resolvers are
|
||||
|
@ -123,7 +124,7 @@ type Address struct {
|
|||
Attributes *attributes.Attributes
|
||||
|
||||
// BalancerAttributes contains arbitrary data about this address intended
|
||||
// for consumption by the LB policy. These attribes do not affect SubConn
|
||||
// for consumption by the LB policy. These attributes do not affect SubConn
|
||||
// creation, connection establishment, handshaking, etc.
|
||||
BalancerAttributes *attributes.Attributes
|
||||
|
||||
|
@ -150,7 +151,17 @@ func (a Address) Equal(o Address) bool {
|
|||
|
||||
// String returns JSON formatted string representation of the address.
|
||||
func (a Address) String() string {
|
||||
return pretty.ToJSON(a)
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
|
||||
sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
|
||||
if a.Attributes != nil {
|
||||
sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
|
||||
}
|
||||
if a.BalancerAttributes != nil {
|
||||
sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// BuildOptions includes additional information for the builder to create
|
||||
|
@ -203,6 +214,15 @@ type State struct {
|
|||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// UpdateState updates the state of the ClientConn appropriately.
|
||||
//
|
||||
// If an error is returned, the resolver should try to resolve the
|
||||
// target again. The resolver should use a backoff timer to prevent
|
||||
// overloading the server with requests. If a resolver is certain that
|
||||
// reresolving will not change the result, e.g. because it is
|
||||
// a watch-based resolver, returned errors can be ignored.
|
||||
//
|
||||
// If the resolved State is the same as the last reported one, calling
|
||||
// UpdateState can be omitted.
|
||||
UpdateState(State) error
|
||||
// ReportError notifies the ClientConn that the Resolver encountered an
|
||||
// error. The ClientConn will notify the load balancer and begin calling
|
||||
|
@ -280,8 +300,10 @@ type Builder interface {
|
|||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
// Scheme returns the scheme supported by this resolver. Scheme is defined
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
|
||||
// string should not contain uppercase characters, as they will not match
|
||||
// the parsed target's scheme as defined in RFC 3986.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
|
|
189
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
189
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
|
@ -19,11 +19,11 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
|
@ -31,129 +31,192 @@ import (
|
|||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// resolverStateUpdater wraps the single method used by ccResolverWrapper to
|
||||
// report a state update from the actual resolver implementation.
|
||||
type resolverStateUpdater interface {
|
||||
updateResolverState(s resolver.State, err error) error
|
||||
}
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolverMu sync.Mutex
|
||||
resolver resolver.Resolver
|
||||
done *grpcsync.Event
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc resolverStateUpdater
|
||||
channelzID *channelz.Identifier
|
||||
ignoreServiceConfig bool
|
||||
opts ccResolverWrapperOpts
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
|
||||
serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
|
||||
|
||||
// All incoming (resolver --> gRPC) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled on the serializer.
|
||||
// Fields accessed *only* in these serializer callbacks, can therefore be
|
||||
// accessed without a mutex.
|
||||
curState resolver.State
|
||||
|
||||
incomingMu sync.Mutex // Synchronizes all the incoming calls.
|
||||
// mu guards access to the below fields.
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
resolver resolver.Resolver // Accessed only from outgoing calls.
|
||||
}
|
||||
|
||||
// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
|
||||
// ccResolverWrapper.
|
||||
type ccResolverWrapperOpts struct {
|
||||
target resolver.Target // User specified dial target to resolve.
|
||||
builder resolver.Builder // Resolver builder to use.
|
||||
bOpts resolver.BuildOptions // Resolver build options to use.
|
||||
channelzID *channelz.Identifier // Channelz identifier for the channel.
|
||||
}
|
||||
|
||||
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
||||
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
|
||||
func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
done: grpcsync.NewEvent(),
|
||||
channelzID: opts.channelzID,
|
||||
ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
|
||||
opts: opts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
}
|
||||
rbo := resolver.BuildOptions{
|
||||
DisableServiceConfig: cc.dopts.disableServiceConfig,
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
}
|
||||
|
||||
var err error
|
||||
// We need to hold the lock here while we assign to the ccr.resolver field
|
||||
// to guard against a data race caused by the following code path,
|
||||
// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
|
||||
// accessing ccr.resolver which is being assigned here.
|
||||
ccr.resolverMu.Lock()
|
||||
defer ccr.resolverMu.Unlock()
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
|
||||
// Cannot hold the lock at build time because the resolver can send an
|
||||
// update or error inline and these incoming calls grab the lock to schedule
|
||||
// a callback in the serializer.
|
||||
r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Any error reported by the resolver at build time that leads to a
|
||||
// re-resolution request from the balancer is dropped by grpc until we
|
||||
// return from this function. So, we don't have to handle pending resolveNow
|
||||
// requests here.
|
||||
ccr.mu.Lock()
|
||||
ccr.resolver = r
|
||||
ccr.mu.Unlock()
|
||||
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.resolverMu.Lock()
|
||||
if !ccr.done.HasFired() {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
ccr.mu.Lock()
|
||||
defer ccr.mu.Unlock()
|
||||
|
||||
// ccr.resolver field is set only after the call to Build() returns. But in
|
||||
// the process of building, the resolver may send an error update which when
|
||||
// propagated to the balancer may result in a re-resolution request.
|
||||
if ccr.closed || ccr.resolver == nil {
|
||||
return
|
||||
}
|
||||
ccr.resolverMu.Unlock()
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolverMu.Lock()
|
||||
ccr.resolver.Close()
|
||||
ccr.done.Fire()
|
||||
ccr.resolverMu.Unlock()
|
||||
ccr.mu.Lock()
|
||||
if ccr.closed {
|
||||
ccr.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return nil
|
||||
channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
|
||||
|
||||
// Close the serializer to ensure that no more calls from the resolver are
|
||||
// handled, before actually closing the resolver.
|
||||
ccr.serializerCancel()
|
||||
ccr.closed = true
|
||||
r := ccr.resolver
|
||||
ccr.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish.
|
||||
<-ccr.serializer.Done
|
||||
|
||||
// Spawn a goroutine to close the resolver (since it may block trying to
|
||||
// cleanup all allocated resources) and return early.
|
||||
go r.Close()
|
||||
}
|
||||
|
||||
// serializerScheduleLocked is a convenience method to schedule a function to be
|
||||
// run on the serializer while holding ccr.mu.
|
||||
func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
|
||||
ccr.mu.Lock()
|
||||
ccr.serializer.Schedule(f)
|
||||
ccr.mu.Unlock()
|
||||
}
|
||||
|
||||
// UpdateState is called by resolver implementations to report new state to gRPC
|
||||
// which includes addresses and service config.
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
|
||||
errCh := make(chan error, 1)
|
||||
ok := ccr.serializer.Schedule(func(context.Context) {
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
ccr.curState = s
|
||||
if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
|
||||
return balancer.ErrBadResolverState
|
||||
errCh <- balancer.ErrBadResolverState
|
||||
return
|
||||
}
|
||||
errCh <- nil
|
||||
})
|
||||
if !ok {
|
||||
// The only time when Schedule() fail to add the callback to the
|
||||
// serializer is when the serializer is closed, and this happens only
|
||||
// when the resolver wrapper is closed.
|
||||
return nil
|
||||
}
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
// ReportError is called by resolver implementations to report errors
|
||||
// encountered during name resolution to gRPC.
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
|
||||
ccr.cc.updateResolverState(resolver.State{}, err)
|
||||
})
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
||||
// NewAddress is called by the resolver implementation to send addresses to
|
||||
// gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implementation to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
ccr.incomingMu.Lock()
|
||||
defer ccr.incomingMu.Unlock()
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||
if ccr.cc.dopts.disableServiceConfig {
|
||||
channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config")
|
||||
ccr.serializerScheduleLocked(func(_ context.Context) {
|
||||
channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
|
||||
if ccr.ignoreServiceConfig {
|
||||
channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
return
|
||||
}
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.cc.updateResolverState(ccr.curState, nil)
|
||||
})
|
||||
}
|
||||
|
||||
// ParseServiceConfig is called by resolver implementations to parse a JSON
|
||||
// representation of the service config.
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON)
|
||||
}
|
||||
|
||||
// addChannelzTraceEvent adds a channelz trace event containing the new
|
||||
// state received from resolver implementations.
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
var oldSC, newSC *ServiceConfig
|
||||
|
@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
|||
} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
|
||||
updates = append(updates, "resolver returned new addresses")
|
||||
}
|
||||
channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
|
||||
}
|
||||
|
|
41
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
41
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
@ -159,6 +159,7 @@ type callInfo struct {
|
|||
contentSubtype string
|
||||
codec baseCodec
|
||||
maxRetryRPCBufferSize int
|
||||
onFinish []func(err error)
|
||||
}
|
||||
|
||||
func defaultCallInfo() *callInfo {
|
||||
|
@ -295,6 +296,41 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
|||
}
|
||||
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// OnFinish returns a CallOption that configures a callback to be called when
|
||||
// the call completes. The error passed to the callback is the status of the
|
||||
// RPC, and may be nil. The onFinish callback provided will only be called once
|
||||
// by gRPC. This is mainly used to be used by streaming interceptors, to be
|
||||
// notified when the RPC completes along with information about the status of
|
||||
// the RPC.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func OnFinish(onFinish func(err error)) CallOption {
|
||||
return OnFinishCallOption{
|
||||
OnFinish: onFinish,
|
||||
}
|
||||
}
|
||||
|
||||
// OnFinishCallOption is CallOption that indicates a callback to be called when
|
||||
// the call completes.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type OnFinishCallOption struct {
|
||||
OnFinish func(error)
|
||||
}
|
||||
|
||||
func (o OnFinishCallOption) before(c *callInfo) error {
|
||||
c.onFinish = append(c.onFinish, o.OnFinish)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can receive. If this is not set, gRPC uses the default
|
||||
// 4MB.
|
||||
|
@ -663,6 +699,7 @@ func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time)
|
|||
Data: data,
|
||||
Length: len(data),
|
||||
WireLength: len(payload) + headerLen,
|
||||
CompressedLength: len(payload),
|
||||
SentTime: t,
|
||||
}
|
||||
}
|
||||
|
@ -684,7 +721,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
|
|||
}
|
||||
|
||||
type payloadInfo struct {
|
||||
wireLength int // The compressed length got from wire.
|
||||
compressedLength int // The compressed length got from wire.
|
||||
uncompressedBytes []byte
|
||||
}
|
||||
|
||||
|
@ -694,7 +731,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||
return nil, err
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.wireLength = len(d)
|
||||
payInfo.compressedLength = len(d)
|
||||
}
|
||||
|
||||
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
|
||||
|
|
250
vendor/google.golang.org/grpc/server.go
generated
vendored
250
vendor/google.golang.org/grpc/server.go
generated
vendored
|
@ -43,8 +43,8 @@ import (
|
|||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
@ -74,10 +74,10 @@ func init() {
|
|||
srv.drainServerTransports(addr)
|
||||
}
|
||||
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
|
||||
extraServerOptions = append(extraServerOptions, opt...)
|
||||
globalServerOptions = append(globalServerOptions, opt...)
|
||||
}
|
||||
internal.ClearGlobalServerOptions = func() {
|
||||
extraServerOptions = nil
|
||||
globalServerOptions = nil
|
||||
}
|
||||
internal.BinaryLogger = binaryLogger
|
||||
internal.JoinServerOptions = newJoinServerOption
|
||||
|
@ -115,12 +115,6 @@ type serviceInfo struct {
|
|||
mdata interface{}
|
||||
}
|
||||
|
||||
type serverWorkerData struct {
|
||||
st transport.ServerTransport
|
||||
wg *sync.WaitGroup
|
||||
stream *transport.Stream
|
||||
}
|
||||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
type Server struct {
|
||||
opts serverOptions
|
||||
|
@ -145,7 +139,7 @@ type Server struct {
|
|||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
serverWorkerChannels []chan *serverWorkerData
|
||||
serverWorkerChannel chan func()
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
|
@ -177,13 +171,14 @@ type serverOptions struct {
|
|||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
maxConcurrentStreams: math.MaxUint32,
|
||||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
||||
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
||||
connectionTimeout: 120 * time.Second,
|
||||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
}
|
||||
var extraServerOptions []ServerOption
|
||||
var globalServerOptions []ServerOption
|
||||
|
||||
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
||||
type ServerOption interface {
|
||||
|
@ -387,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption {
|
|||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
if n == 0 {
|
||||
n = math.MaxUint32
|
||||
}
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.maxConcurrentStreams = n
|
||||
})
|
||||
|
@ -560,47 +558,40 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
|||
const serverWorkerResetThreshold = 1 << 16
|
||||
|
||||
// serverWorkers blocks on a *transport.Stream channel forever and waits for
|
||||
// data to be fed by serveStreams. This allows different requests to be
|
||||
// data to be fed by serveStreams. This allows multiple requests to be
|
||||
// processed by the same goroutine, removing the need for expensive stack
|
||||
// re-allocations (see the runtime.morestack problem [1]).
|
||||
//
|
||||
// [1] https://github.com/golang/go/issues/18138
|
||||
func (s *Server) serverWorker(ch chan *serverWorkerData) {
|
||||
// To make sure all server workers don't reset at the same time, choose a
|
||||
// random number of iterations before resetting.
|
||||
threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
|
||||
for completed := 0; completed < threshold; completed++ {
|
||||
data, ok := <-ch
|
||||
func (s *Server) serverWorker() {
|
||||
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
||||
f, ok := <-s.serverWorkerChannel
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
|
||||
data.wg.Done()
|
||||
f()
|
||||
}
|
||||
go s.serverWorker(ch)
|
||||
go s.serverWorker()
|
||||
}
|
||||
|
||||
// initServerWorkers creates worker goroutines and channels to process incoming
|
||||
// initServerWorkers creates worker goroutines and a channel to process incoming
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
s.serverWorkerChannels[i] = make(chan *serverWorkerData)
|
||||
go s.serverWorker(s.serverWorkerChannels[i])
|
||||
go s.serverWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) stopServerWorkers() {
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
close(s.serverWorkerChannels[i])
|
||||
}
|
||||
close(s.serverWorkerChannel)
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
opts := defaultServerOptions
|
||||
for _, o := range extraServerOptions {
|
||||
for _, o := range globalServerOptions {
|
||||
o.apply(&opts)
|
||||
}
|
||||
for _, o := range opt {
|
||||
|
@ -897,7 +888,7 @@ func (s *Server) drainServerTransports(addr string) {
|
|||
s.mu.Lock()
|
||||
conns := s.conns[addr]
|
||||
for st := range conns {
|
||||
st.Drain()
|
||||
st.Drain("")
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
@ -945,26 +936,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
|||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var roundRobinCounter uint32
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
|
||||
select {
|
||||
case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
|
||||
default:
|
||||
// If all stream workers are busy, fallback to the default code path.
|
||||
go func() {
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
select {
|
||||
case s.serverWorkerChannel <- f:
|
||||
return
|
||||
default:
|
||||
// If all stream workers are busy, fallback to the default code path.
|
||||
}
|
||||
}
|
||||
go f()
|
||||
}, func(ctx context.Context, method string) context.Context {
|
||||
if !EnableTracing {
|
||||
return ctx
|
||||
|
@ -1053,7 +1044,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
|||
if s.drain {
|
||||
// Transport added after we drained our existing conns: drain it
|
||||
// immediately.
|
||||
st.Drain()
|
||||
st.Drain("")
|
||||
}
|
||||
|
||||
if s.conns[addr] == nil {
|
||||
|
@ -1252,7 +1243,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(logEntry)
|
||||
binlog.Log(ctx, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1263,6 +1254,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
var comp, decomp encoding.Compressor
|
||||
var cp Compressor
|
||||
var dc Decompressor
|
||||
var sendCompressorName string
|
||||
|
||||
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
||||
// to find a matching registered compressor for decomp.
|
||||
|
@ -1283,12 +1275,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
if s.opts.cp != nil {
|
||||
cp = s.opts.cp
|
||||
stream.SetSendCompress(cp.Type())
|
||||
sendCompressorName = cp.Type()
|
||||
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
||||
// Legacy compressor not specified; attempt to respond with same encoding.
|
||||
comp = encoding.GetCompressor(rc)
|
||||
if comp != nil {
|
||||
stream.SetSendCompress(rc)
|
||||
sendCompressorName = comp.Name()
|
||||
}
|
||||
}
|
||||
|
||||
if sendCompressorName != "" {
|
||||
if err := stream.SetSendCompress(sendCompressorName); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1314,9 +1312,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
sh.HandleRPC(stream.Context(), &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: v,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Data: d,
|
||||
Length: len(d),
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
Data: d,
|
||||
})
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
|
@ -1324,7 +1323,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
Message: d,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(cm)
|
||||
binlog.Log(stream.Context(), cm)
|
||||
}
|
||||
}
|
||||
if trInfo != nil {
|
||||
|
@ -1357,7 +1356,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
Header: h,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(sh)
|
||||
binlog.Log(stream.Context(), sh)
|
||||
}
|
||||
}
|
||||
st := &binarylog.ServerTrailer{
|
||||
|
@ -1365,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(st)
|
||||
binlog.Log(stream.Context(), st)
|
||||
}
|
||||
}
|
||||
return appErr
|
||||
|
@ -1375,6 +1374,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
}
|
||||
opts := &transport.Options{Last: true}
|
||||
|
||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||
// In case it is set, we need to use it for compressing outbound message.
|
||||
if stream.SendCompress() != sendCompressorName {
|
||||
comp = encoding.GetCompressor(stream.SendCompress())
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
|
@ -1402,8 +1406,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(sh)
|
||||
binlog.Log(st)
|
||||
binlog.Log(stream.Context(), sh)
|
||||
binlog.Log(stream.Context(), st)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -1417,8 +1421,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
Message: reply,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(sh)
|
||||
binlog.Log(sm)
|
||||
binlog.Log(stream.Context(), sh)
|
||||
binlog.Log(stream.Context(), sm)
|
||||
}
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
|
@ -1430,17 +1434,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
// TODO: Should we be logging if writing status failed here, like above?
|
||||
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
|
||||
// error or allow the stats handler to see it?
|
||||
err = t.WriteStatus(stream, statusOK)
|
||||
if len(binlogs) != 0 {
|
||||
st := &binarylog.ServerTrailer{
|
||||
Trailer: stream.Trailer(),
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(st)
|
||||
binlog.Log(stream.Context(), st)
|
||||
}
|
||||
}
|
||||
return err
|
||||
return t.WriteStatus(stream, statusOK)
|
||||
}
|
||||
|
||||
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
||||
|
@ -1574,7 +1577,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(logEntry)
|
||||
binlog.Log(stream.Context(), logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1597,12 +1600,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
if s.opts.cp != nil {
|
||||
ss.cp = s.opts.cp
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
ss.sendCompressorName = s.opts.cp.Type()
|
||||
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
||||
// Legacy compressor not specified; attempt to respond with same encoding.
|
||||
ss.comp = encoding.GetCompressor(rc)
|
||||
if ss.comp != nil {
|
||||
stream.SetSendCompress(rc)
|
||||
ss.sendCompressorName = rc
|
||||
}
|
||||
}
|
||||
|
||||
if ss.sendCompressorName != "" {
|
||||
if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1640,16 +1649,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
ss.trInfo.tr.SetError()
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
t.WriteStatus(ss.s, appStatus)
|
||||
if len(ss.binlogs) != 0 {
|
||||
st := &binarylog.ServerTrailer{
|
||||
Trailer: ss.s.Trailer(),
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(st)
|
||||
binlog.Log(stream.Context(), st)
|
||||
}
|
||||
}
|
||||
t.WriteStatus(ss.s, appStatus)
|
||||
// TODO: Should we log an error from WriteStatus here and below?
|
||||
return appErr
|
||||
}
|
||||
|
@ -1658,17 +1667,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
err = t.WriteStatus(ss.s, statusOK)
|
||||
if len(ss.binlogs) != 0 {
|
||||
st := &binarylog.ServerTrailer{
|
||||
Trailer: ss.s.Trailer(),
|
||||
Err: appErr,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(st)
|
||||
binlog.Log(stream.Context(), st)
|
||||
}
|
||||
}
|
||||
return err
|
||||
return t.WriteStatus(ss.s, statusOK)
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||
|
@ -1846,7 +1854,7 @@ func (s *Server) GracefulStop() {
|
|||
if !s.drain {
|
||||
for _, conns := range s.conns {
|
||||
for st := range conns {
|
||||
st.Drain()
|
||||
st.Drain("graceful_stop")
|
||||
}
|
||||
}
|
||||
s.drain = true
|
||||
|
@ -1935,6 +1943,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// SetSendCompressor sets a compressor for outbound messages from the server.
|
||||
// It must not be called after any event that causes headers to be sent
|
||||
// (see ServerStream.SetHeader for the complete list). Provided compressor is
|
||||
// used when below conditions are met:
|
||||
//
|
||||
// - compressor is registered via encoding.RegisterCompressor
|
||||
// - compressor name must exist in the client advertised compressor names
|
||||
// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
|
||||
// get client supported compressor names.
|
||||
//
|
||||
// The context provided must be the context passed to the server's handler.
|
||||
// It must be noted that compressor name encoding.Identity disables the
|
||||
// outbound compression.
|
||||
// By default, server messages will be sent using the same compressor with
|
||||
// which request messages were sent.
|
||||
//
|
||||
// It is not safe to call SetSendCompressor concurrently with SendHeader and
|
||||
// SendMsg.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func SetSendCompressor(ctx context.Context, name string) error {
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
||||
if !ok || stream == nil {
|
||||
return fmt.Errorf("failed to fetch the stream from the given context")
|
||||
}
|
||||
|
||||
if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
|
||||
return fmt.Errorf("unable to set send compressor: %w", err)
|
||||
}
|
||||
|
||||
return stream.SetSendCompress(name)
|
||||
}
|
||||
|
||||
// ClientSupportedCompressors returns compressor names advertised by the client
|
||||
// via grpc-accept-encoding header.
|
||||
//
|
||||
// The context provided must be the context passed to the server's handler.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
||||
if !ok || stream == nil {
|
||||
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
||||
}
|
||||
|
||||
return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||
// When called more than once, all the provided metadata will be merged.
|
||||
//
|
||||
|
@ -1969,3 +2031,51 @@ type channelzServer struct {
|
|||
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||
return c.s.channelzMetric()
|
||||
}
|
||||
|
||||
// validateSendCompressor returns an error when given compressor name cannot be
|
||||
// handled by the server or the client based on the advertised compressors.
|
||||
func validateSendCompressor(name, clientCompressors string) error {
|
||||
if name == encoding.Identity {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !grpcutil.IsCompressorNameRegistered(name) {
|
||||
return fmt.Errorf("compressor not registered %q", name)
|
||||
}
|
||||
|
||||
for _, c := range strings.Split(clientCompressors, ",") {
|
||||
if c == name {
|
||||
return nil // found match
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("client does not support compressor %q", name)
|
||||
}
|
||||
|
||||
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||
// called synchronously; release may be called asynchronously.
|
||||
type atomicSemaphore struct {
|
||||
n int64
|
||||
wait chan struct{}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) acquire() {
|
||||
if atomic.AddInt64(&q.n, -1) < 0 {
|
||||
// We ran out of quota. Block until a release happens.
|
||||
<-q.wait
|
||||
}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) release() {
|
||||
// N.B. the "<= 0" check below should allow for this to work with multiple
|
||||
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||
// acquire, as our system does, n will never be less than -1. There are
|
||||
// fairness issues (queuing) to consider if this was to be generalized.
|
||||
if atomic.AddInt64(&q.n, 1) <= 0 {
|
||||
// An acquire was waiting on us. Unblock it.
|
||||
q.wait <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||
return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)}
|
||||
}
|
||||
|
|
75
vendor/google.golang.org/grpc/service_config.go
generated
vendored
75
vendor/google.golang.org/grpc/service_config.go
generated
vendored
|
@ -23,8 +23,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -106,8 +104,8 @@ type healthCheckConfig struct {
|
|||
|
||||
type jsonRetryPolicy struct {
|
||||
MaxAttempts int
|
||||
InitialBackoff string
|
||||
MaxBackoff string
|
||||
InitialBackoff internalserviceconfig.Duration
|
||||
MaxBackoff internalserviceconfig.Duration
|
||||
BackoffMultiplier float64
|
||||
RetryableStatusCodes []codes.Code
|
||||
}
|
||||
|
@ -129,50 +127,6 @@ type retryThrottlingPolicy struct {
|
|||
TokenRatio float64
|
||||
}
|
||||
|
||||
func parseDuration(s *string) (*time.Duration, error) {
|
||||
if s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !strings.HasSuffix(*s, "s") {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var d time.Duration
|
||||
if len(ss[0]) > 0 {
|
||||
i, err := strconv.ParseInt(ss[0], 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
d = time.Duration(i) * time.Second
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
f, err := strconv.ParseInt(ss[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
f *= 10
|
||||
}
|
||||
d += time.Duration(f)
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||
}
|
||||
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
type jsonName struct {
|
||||
Service string
|
||||
Method string
|
||||
|
@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) {
|
|||
type jsonMC struct {
|
||||
Name *[]jsonName
|
||||
WaitForReady *bool
|
||||
Timeout *string
|
||||
Timeout *internalserviceconfig.Duration
|
||||
MaxRequestMessageBytes *int64
|
||||
MaxResponseMessageBytes *int64
|
||||
RetryPolicy *jsonRetryPolicy
|
||||
|
@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|||
if m.Name == nil {
|
||||
continue
|
||||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
WaitForReady: m.WaitForReady,
|
||||
Timeout: d,
|
||||
Timeout: (*time.Duration)(m.Timeout),
|
||||
}
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
|
@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
|||
if jrp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ib, err := parseDuration(&jrp.InitialBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mb, err := parseDuration(&jrp.MaxBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if jrp.MaxAttempts <= 1 ||
|
||||
*ib <= 0 ||
|
||||
*mb <= 0 ||
|
||||
jrp.InitialBackoff <= 0 ||
|
||||
jrp.MaxBackoff <= 0 ||
|
||||
jrp.BackoffMultiplier <= 0 ||
|
||||
len(jrp.RetryableStatusCodes) == 0 {
|
||||
logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
|
||||
|
@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
|
|||
|
||||
rp := &internalserviceconfig.RetryPolicy{
|
||||
MaxAttempts: jrp.MaxAttempts,
|
||||
InitialBackoff: *ib,
|
||||
MaxBackoff: *mb,
|
||||
InitialBackoff: time.Duration(jrp.InitialBackoff),
|
||||
MaxBackoff: time.Duration(jrp.MaxBackoff),
|
||||
BackoffMultiplier: jrp.BackoffMultiplier,
|
||||
RetryableStatusCodes: make(map[codes.Code]bool),
|
||||
}
|
||||
|
|
22
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
22
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
|
@ -67,10 +67,18 @@ type InPayload struct {
|
|||
Payload interface{}
|
||||
// Data is the serialized message payload.
|
||||
Data []byte
|
||||
// Length is the length of uncompressed data.
|
||||
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
// framing (gRPC or HTTP/2).
|
||||
Length int
|
||||
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||
// CompressedLength is the size of the compressed payload data. Does not
|
||||
// include any framing (gRPC or HTTP/2). Same as Length if compression not
|
||||
// enabled.
|
||||
CompressedLength int
|
||||
// WireLength is the size of the compressed payload data plus gRPC framing.
|
||||
// Does not include HTTP/2 framing.
|
||||
WireLength int
|
||||
|
||||
// RecvTime is the time when the payload is received.
|
||||
RecvTime time.Time
|
||||
}
|
||||
|
@ -129,9 +137,15 @@ type OutPayload struct {
|
|||
Payload interface{}
|
||||
// Data is the serialized message payload.
|
||||
Data []byte
|
||||
// Length is the length of uncompressed data.
|
||||
// Length is the size of the uncompressed payload data. Does not include any
|
||||
// framing (gRPC or HTTP/2).
|
||||
Length int
|
||||
// WireLength is the length of data on wire (compressed, signed, encrypted).
|
||||
// CompressedLength is the size of the compressed payload data. Does not
|
||||
// include any framing (gRPC or HTTP/2). Same as Length if compression not
|
||||
// enabled.
|
||||
CompressedLength int
|
||||
// WireLength is the size of the compressed payload data plus gRPC framing.
|
||||
// Does not include HTTP/2 framing.
|
||||
WireLength int
|
||||
// SentTime is the time when the payload is sent.
|
||||
SentTime time.Time
|
||||
|
|
53
vendor/google.golang.org/grpc/status/status.go
generated
vendored
53
vendor/google.golang.org/grpc/status/status.go
generated
vendored
|
@ -77,9 +77,18 @@ func FromProto(s *spb.Status) *Status {
|
|||
// FromError returns a Status representation of err.
|
||||
//
|
||||
// - If err was produced by this package or implements the method `GRPCStatus()
|
||||
// *Status`, the appropriate Status is returned.
|
||||
// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
|
||||
// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
|
||||
// errors, the message returned contains the entire err.Error() text and not
|
||||
// just the wrapped status. In that case, ok is true.
|
||||
//
|
||||
// - If err is nil, a Status is returned with codes.OK and no message.
|
||||
// - If err is nil, a Status is returned with codes.OK and no message, and ok
|
||||
// is true.
|
||||
//
|
||||
// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
|
||||
// returns nil (which maps to Codes.OK), or if err wraps a type
|
||||
// satisfying this, a Status is returned with codes.Unknown and err's
|
||||
// Error() message, and ok is false.
|
||||
//
|
||||
// - Otherwise, err is an error not compatible with this package. In this
|
||||
// case, a Status is returned with codes.Unknown and err's Error() message,
|
||||
|
@ -88,10 +97,29 @@ func FromError(err error) (s *Status, ok bool) {
|
|||
if err == nil {
|
||||
return nil, true
|
||||
}
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *Status
|
||||
}); ok {
|
||||
return se.GRPCStatus(), true
|
||||
type grpcstatus interface{ GRPCStatus() *Status }
|
||||
if gs, ok := err.(grpcstatus); ok {
|
||||
if gs.GRPCStatus() == nil {
|
||||
// Error has status nil, which maps to codes.OK. There
|
||||
// is no sensible behavior for this, so we turn it into
|
||||
// an error with codes.Unknown and discard the existing
|
||||
// status.
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
return gs.GRPCStatus(), true
|
||||
}
|
||||
var gs grpcstatus
|
||||
if errors.As(err, &gs) {
|
||||
if gs.GRPCStatus() == nil {
|
||||
// Error wraps an error that has status nil, which maps
|
||||
// to codes.OK. There is no sensible behavior for this,
|
||||
// so we turn it into an error with codes.Unknown and
|
||||
// discard the existing status.
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
p := gs.GRPCStatus().Proto()
|
||||
p.Message = err.Error()
|
||||
return status.FromProto(p), true
|
||||
}
|
||||
return New(codes.Unknown, err.Error()), false
|
||||
}
|
||||
|
@ -103,19 +131,16 @@ func Convert(err error) *Status {
|
|||
return s
|
||||
}
|
||||
|
||||
// Code returns the Code of the error if it is a Status error, codes.OK if err
|
||||
// is nil, or codes.Unknown otherwise.
|
||||
// Code returns the Code of the error if it is a Status error or if it wraps a
|
||||
// Status error. If that is not the case, it returns codes.OK if err is nil, or
|
||||
// codes.Unknown otherwise.
|
||||
func Code(err error) codes.Code {
|
||||
// Don't use FromError to avoid allocation of OK status.
|
||||
if err == nil {
|
||||
return codes.OK
|
||||
}
|
||||
if se, ok := err.(interface {
|
||||
GRPCStatus() *Status
|
||||
}); ok {
|
||||
return se.GRPCStatus().Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
|
||||
return Convert(err).Code()
|
||||
}
|
||||
|
||||
// FromContextError converts a context error or wrapped context error into a
|
||||
|
|
82
vendor/google.golang.org/grpc/stream.go
generated
vendored
82
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
@ -123,6 +123,9 @@ type ClientStream interface {
|
|||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines. It is also
|
||||
// not safe to call CloseSend concurrently with SendMsg.
|
||||
//
|
||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||
// libraries and stats handlers may use the message lazily.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the stream completes successfully. On
|
||||
|
@ -152,6 +155,11 @@ type ClientStream interface {
|
|||
// If none of the above happen, a goroutine and a context will be leaked, and grpc
|
||||
// will not call the optionally-configured stats handler with a stats.End message.
|
||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cc.idlenessMgr.onCallEnd()
|
||||
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
@ -168,10 +176,19 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
}
|
||||
|
||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||
// validate md
|
||||
if err := imetadata.Validate(md); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
// validate added
|
||||
for _, kvs := range added {
|
||||
for i := 0; i < len(kvs); i += 2 {
|
||||
if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
cc.incrCallsStarted()
|
||||
|
@ -352,7 +369,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
|
|||
}
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(logEntry)
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -460,7 +477,7 @@ func (a *csAttempt) newStream() error {
|
|||
// It is safe to overwrite the csAttempt's context here, since all state
|
||||
// maintained in it are local to the attempt. When the attempt has to be
|
||||
// retried, a new instance of csAttempt will be created.
|
||||
if a.pickResult.Metatada != nil {
|
||||
if a.pickResult.Metadata != nil {
|
||||
// We currently do not have a function it the metadata package which
|
||||
// merges given metadata with existing metadata in a context. Existing
|
||||
// function `AppendToOutgoingContext()` takes a variadic argument of key
|
||||
|
@ -470,7 +487,7 @@ func (a *csAttempt) newStream() error {
|
|||
// in a form passable to AppendToOutgoingContext(), or create a version
|
||||
// of AppendToOutgoingContext() that accepts a metadata.MD.
|
||||
md, _ := metadata.FromOutgoingContext(a.ctx)
|
||||
md = metadata.Join(md, a.pickResult.Metatada)
|
||||
md = metadata.Join(md, a.pickResult.Metadata)
|
||||
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
||||
}
|
||||
|
||||
|
@ -800,7 +817,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
|||
}
|
||||
cs.serverHeaderBinlogged = true
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(logEntry)
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
|
@ -881,7 +898,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
Message: data,
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(cm)
|
||||
binlog.Log(cs.ctx, cm)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -905,7 +922,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
|
|||
Message: recvInfo.uncompressedBytes,
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(sm)
|
||||
binlog.Log(cs.ctx, sm)
|
||||
}
|
||||
}
|
||||
if err != nil || !cs.desc.ServerStreams {
|
||||
|
@ -926,7 +943,7 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
|
|||
logEntry.PeerAddr = peer.Addr
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(logEntry)
|
||||
binlog.Log(cs.ctx, logEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -953,7 +970,7 @@ func (cs *clientStream) CloseSend() error {
|
|||
OnClientSide: true,
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(chc)
|
||||
binlog.Log(cs.ctx, chc)
|
||||
}
|
||||
}
|
||||
// We never returned an error here for reasons.
|
||||
|
@ -971,6 +988,9 @@ func (cs *clientStream) finish(err error) {
|
|||
return
|
||||
}
|
||||
cs.finished = true
|
||||
for _, onFinish := range cs.callInfo.onFinish {
|
||||
onFinish(err)
|
||||
}
|
||||
cs.commitAttemptLocked()
|
||||
if cs.attempt != nil {
|
||||
cs.attempt.finish(err)
|
||||
|
@ -992,7 +1012,7 @@ func (cs *clientStream) finish(err error) {
|
|||
OnClientSide: true,
|
||||
}
|
||||
for _, binlog := range cs.binlogs {
|
||||
binlog.Log(c)
|
||||
binlog.Log(cs.ctx, c)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
|
@ -1082,7 +1102,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
|
|||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
})
|
||||
}
|
||||
|
@ -1252,14 +1273,19 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
|
|||
as.p = &parser{r: s}
|
||||
ac.incrCallsStarted()
|
||||
if desc != unaryStreamDesc {
|
||||
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||
// ClientConn or cancels the stream context. In all other cases, an error
|
||||
// should already be injected into the recv buffer by the transport, which
|
||||
// the client will eventually receive, and then we will cancel the stream's
|
||||
// context in clientStream.finish.
|
||||
// Listen on stream context to cleanup when the stream context is
|
||||
// canceled. Also listen for the addrConn's context in case the
|
||||
// addrConn is closed or reconnects to a different address. In all
|
||||
// other cases, an error should already be injected into the recv
|
||||
// buffer by the transport, which the client will eventually receive,
|
||||
// and then we will cancel the stream's context in
|
||||
// addrConnStream.finish.
|
||||
go func() {
|
||||
ac.mu.Lock()
|
||||
acCtx := ac.ctx
|
||||
ac.mu.Unlock()
|
||||
select {
|
||||
case <-ac.ctx.Done():
|
||||
case <-acCtx.Done():
|
||||
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
|
||||
case <-ctx.Done():
|
||||
as.finish(toRPCErr(ctx.Err()))
|
||||
|
@ -1511,6 +1537,8 @@ type serverStream struct {
|
|||
comp encoding.Compressor
|
||||
decomp encoding.Compressor
|
||||
|
||||
sendCompressorName string
|
||||
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
|
@ -1558,7 +1586,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
|
|||
}
|
||||
ss.serverHeaderBinlogged = true
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(sh)
|
||||
binlog.Log(ss.ctx, sh)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -1603,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||
// In case it is set, we need to use it for compressing outbound message.
|
||||
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
|
||||
ss.comp = encoding.GetCompressor(sendCompressorsName)
|
||||
ss.sendCompressorName = sendCompressorsName
|
||||
}
|
||||
|
||||
// load hdr, payload, data
|
||||
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
|
||||
if err != nil {
|
||||
|
@ -1624,14 +1659,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
ss.serverHeaderBinlogged = true
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(sh)
|
||||
binlog.Log(ss.ctx, sh)
|
||||
}
|
||||
}
|
||||
sm := &binarylog.ServerMessage{
|
||||
Message: data,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(sm)
|
||||
binlog.Log(ss.ctx, sm)
|
||||
}
|
||||
}
|
||||
if len(ss.statsHandler) != 0 {
|
||||
|
@ -1679,7 +1714,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
if len(ss.binlogs) != 0 {
|
||||
chc := &binarylog.ClientHalfClose{}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(chc)
|
||||
binlog.Log(ss.ctx, chc)
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -1696,8 +1731,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
Payload: m,
|
||||
// TODO truncate large payload.
|
||||
Data: payInfo.uncompressedBytes,
|
||||
WireLength: payInfo.wireLength + headerLen,
|
||||
Length: len(payInfo.uncompressedBytes),
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1706,7 +1742,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||
Message: payInfo.uncompressedBytes,
|
||||
}
|
||||
for _, binlog := range ss.binlogs {
|
||||
binlog.Log(cm)
|
||||
binlog.Log(ss.ctx, cm)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.53.0"
|
||||
const Version = "1.56.3"
|
||||
|
|
15
vendor/google.golang.org/grpc/vet.sh
generated
vendored
15
vendor/google.golang.org/grpc/vet.sh
generated
vendored
|
@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then
|
|||
github.com/client9/misspell/cmd/misspell
|
||||
popd
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
if [[ "${TRAVIS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.14.0
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/travis
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
unzip ${PROTOC_FILENAME}
|
||||
bin/protoc --version
|
||||
popd
|
||||
elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.14.0
|
||||
if [[ "${GITHUB_ACTIONS}" = "true" ]]; then
|
||||
PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files.
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/runner/go
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
|
@ -68,8 +60,7 @@ fi
|
|||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
make proto && git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
|
|
4
vendor/google.golang.org/protobuf/internal/version/version.go
generated
vendored
4
vendor/google.golang.org/protobuf/internal/version/version.go
generated
vendored
|
@ -51,8 +51,8 @@ import (
|
|||
// 10. Send out the CL for review and submit it.
|
||||
const (
|
||||
Major = 1
|
||||
Minor = 29
|
||||
Patch = 1
|
||||
Minor = 30
|
||||
Patch = 0
|
||||
PreRelease = ""
|
||||
)
|
||||
|
||||
|
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
|
@ -4,13 +4,13 @@ cloud.google.com/go/internal
|
|||
cloud.google.com/go/internal/optional
|
||||
cloud.google.com/go/internal/trace
|
||||
cloud.google.com/go/internal/version
|
||||
# cloud.google.com/go/compute v1.18.0
|
||||
# cloud.google.com/go/compute v1.19.1
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/compute/internal
|
||||
# cloud.google.com/go/compute/metadata v0.2.3
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/compute/metadata
|
||||
# cloud.google.com/go/iam v0.12.0
|
||||
# cloud.google.com/go/iam v0.13.0
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/iam
|
||||
cloud.google.com/go/iam/apiv1/iampb
|
||||
|
@ -194,7 +194,7 @@ github.com/golang-jwt/jwt/v4
|
|||
# github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e
|
||||
## explicit
|
||||
github.com/golang/groupcache/lru
|
||||
# github.com/golang/protobuf v1.5.2
|
||||
# github.com/golang/protobuf v1.5.3
|
||||
## explicit; go 1.9
|
||||
github.com/golang/protobuf/jsonpb
|
||||
github.com/golang/protobuf/proto
|
||||
|
@ -378,7 +378,7 @@ golang.org/x/net/http2/hpack
|
|||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.6.0
|
||||
# golang.org/x/oauth2 v0.7.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/authhandler
|
||||
|
@ -433,7 +433,7 @@ google.golang.org/appengine/internal/socket
|
|||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20230320184635-7606e756e683
|
||||
# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
|
@ -442,7 +442,7 @@ google.golang.org/genproto/googleapis/rpc/errdetails
|
|||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/googleapis/type/date
|
||||
google.golang.org/genproto/googleapis/type/expr
|
||||
# google.golang.org/grpc v1.53.0
|
||||
# google.golang.org/grpc v1.56.3
|
||||
## explicit; go 1.17
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
|
@ -504,7 +504,7 @@ google.golang.org/grpc/serviceconfig
|
|||
google.golang.org/grpc/stats
|
||||
google.golang.org/grpc/status
|
||||
google.golang.org/grpc/tap
|
||||
# google.golang.org/protobuf v1.29.1
|
||||
# google.golang.org/protobuf v1.30.0
|
||||
## explicit; go 1.11
|
||||
google.golang.org/protobuf/encoding/protojson
|
||||
google.golang.org/protobuf/encoding/prototext
|
||||
|
|
Loading…
Reference in a new issue