forked from TrueCloudLab/restic
Merge pull request #2478 from restic/update-ci
Update Go version for Appveyor/Travis
This commit is contained in:
commit
aa5af8af0e
128 changed files with 1083 additions and 36981 deletions
12
.travis.yml
12
.travis.yml
|
@ -19,9 +19,17 @@ matrix:
|
||||||
- $HOME/.cache/go-build
|
- $HOME/.cache/go-build
|
||||||
- $HOME/gopath/pkg/mod
|
- $HOME/gopath/pkg/mod
|
||||||
|
|
||||||
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
|
|
||||||
- os: linux
|
- os: linux
|
||||||
go: "1.12.x"
|
go: "1.12.x"
|
||||||
|
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||||
|
cache:
|
||||||
|
directories:
|
||||||
|
- $HOME/.cache/go-build
|
||||||
|
- $HOME/gopath/pkg/mod
|
||||||
|
|
||||||
|
# only run fuse and cloud backends tests on Travis for the latest Go on Linux
|
||||||
|
- os: linux
|
||||||
|
go: "1.13.x"
|
||||||
sudo: true
|
sudo: true
|
||||||
cache:
|
cache:
|
||||||
directories:
|
directories:
|
||||||
|
@ -29,7 +37,7 @@ matrix:
|
||||||
- $HOME/gopath/pkg/mod
|
- $HOME/gopath/pkg/mod
|
||||||
|
|
||||||
- os: osx
|
- os: osx
|
||||||
go: "1.12.x"
|
go: "1.13.x"
|
||||||
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
env: RESTIC_TEST_FUSE=0 RESTIC_TEST_CLOUD_BACKENDS=0
|
||||||
cache:
|
cache:
|
||||||
directories:
|
directories:
|
||||||
|
|
|
@ -20,8 +20,8 @@ init:
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- rmdir c:\go /s /q
|
- rmdir c:\go /s /q
|
||||||
- appveyor DownloadFile https://dl.google.com/go/go1.12.1.windows-amd64.msi
|
- appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-amd64.msi
|
||||||
- msiexec /i go1.12.1.windows-amd64.msi /q
|
- msiexec /i go1.13.4.windows-amd64.msi /q
|
||||||
- go version
|
- go version
|
||||||
- go env
|
- go env
|
||||||
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
- appveyor DownloadFile http://sourceforge.netcologne.de/project/gnuwin32/tar/1.13-1/tar-1.13-1-bin.zip -FileName tar.zip
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -3,21 +3,21 @@ module github.com/restic/restic
|
||||||
require (
|
require (
|
||||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||||
cloud.google.com/go v0.37.4 // indirect
|
cloud.google.com/go v0.37.4 // indirect
|
||||||
contrib.go.opencensus.io/exporter/ocagent v0.4.12 // indirect
|
|
||||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible
|
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible
|
||||||
github.com/Azure/go-autorest v12.0.0+incompatible // indirect
|
github.com/Azure/go-autorest/autorest v0.9.2 // indirect
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible
|
github.com/cenkalti/backoff v2.1.1+incompatible
|
||||||
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
github.com/cpuguy83/go-md2man v1.0.10 // indirect
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
|
||||||
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
||||||
github.com/elithrar/simple-scrypt v1.3.0
|
github.com/elithrar/simple-scrypt v1.3.0
|
||||||
github.com/go-ini/ini v1.42.0 // indirect
|
github.com/go-ini/ini v1.42.0 // indirect
|
||||||
|
github.com/golang/protobuf v1.3.1 // indirect
|
||||||
github.com/google/go-cmp v0.2.0
|
github.com/google/go-cmp v0.2.0
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e // indirect
|
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/juju/ratelimit v1.0.1
|
github.com/juju/ratelimit v1.0.1
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/kurin/blazer v0.5.3
|
github.com/kurin/blazer v0.5.3
|
||||||
github.com/marstr/guid v1.1.0 // indirect
|
github.com/marstr/guid v1.1.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.7
|
github.com/mattn/go-isatty v0.0.7
|
||||||
|
@ -35,6 +35,7 @@ require (
|
||||||
github.com/spf13/cobra v0.0.3
|
github.com/spf13/cobra v0.0.3
|
||||||
github.com/spf13/pflag v1.0.3
|
github.com/spf13/pflag v1.0.3
|
||||||
github.com/stretchr/testify v1.3.0 // indirect
|
github.com/stretchr/testify v1.3.0 // indirect
|
||||||
|
go.opencensus.io v0.20.2 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
|
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
|
||||||
golang.org/x/net v0.0.0-20190424024845-afe8014c977f
|
golang.org/x/net v0.0.0-20190424024845-afe8014c977f
|
||||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a
|
||||||
|
@ -45,7 +46,10 @@ require (
|
||||||
google.golang.org/appengine v1.5.0 // indirect
|
google.golang.org/appengine v1.5.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 // indirect
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 // indirect
|
||||||
google.golang.org/grpc v1.20.1 // indirect
|
google.golang.org/grpc v1.20.1 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
|
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637
|
||||||
gopkg.in/yaml.v2 v2.2.2 // indirect
|
gopkg.in/yaml.v2 v2.2.2 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
28
go.sum
28
go.sum
|
@ -4,12 +4,21 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||||
contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc=
|
|
||||||
contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
|
|
||||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible h1:i+ROfG3CsZUPoVAnhK06T3R6PmBzKB9ds+lHBpN7Mzo=
|
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible h1:i+ROfG3CsZUPoVAnhK06T3R6PmBzKB9ds+lHBpN7Mzo=
|
||||||
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
github.com/Azure/azure-sdk-for-go v27.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||||
github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
|
github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
|
||||||
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||||
|
@ -19,8 +28,6 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
|
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
|
@ -37,7 +44,6 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP
|
||||||
github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg=
|
github.com/elithrar/simple-scrypt v1.3.0 h1:KIlOlxdoQf9JWKl5lMAJ28SY2URB0XTRDn2TckyzAZg=
|
||||||
github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo=
|
github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1qPwrl/EJwEqnZo=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38=
|
github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38=
|
||||||
github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
@ -65,8 +71,6 @@ github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e h1:XWcjeEtTFTOVA
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20190411002643-bd77b112433e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
@ -129,7 +133,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU=
|
github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU=
|
||||||
github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s=
|
github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
|
||||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||||
|
@ -167,7 +170,6 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -192,7 +194,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -227,7 +228,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCP
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
@ -238,11 +238,9 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
|
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=
|
||||||
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
|
gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk=
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||||
|
|
|
@ -28,8 +28,9 @@ var ForbiddenImports = map[string]bool{
|
||||||
|
|
||||||
// Use a specific version of gofmt (the latest stable, usually) to guarantee
|
// Use a specific version of gofmt (the latest stable, usually) to guarantee
|
||||||
// deterministic formatting. This is used with the GoVersion.AtLeast()
|
// deterministic formatting. This is used with the GoVersion.AtLeast()
|
||||||
// function (so that we don't forget to update it).
|
// function (so that we don't forget to update it). This is also used to run
|
||||||
var GofmtVersion = ParseGoVersion("go1.11")
|
// `go mod vendor` and `go mod tidy`.
|
||||||
|
var GofmtVersion = ParseGoVersion("go1.13")
|
||||||
|
|
||||||
// GoVersion is the version of Go used to compile the project.
|
// GoVersion is the version of Go used to compile the project.
|
||||||
type GoVersion struct {
|
type GoVersion struct {
|
||||||
|
@ -200,6 +201,11 @@ func (env *TravisEnvironment) Prepare() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset changes made to go.mod/go.sum by "go get"
|
||||||
|
if err := run("git", "checkout", "go.mod", "go.sum"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := env.getMinio(); err != nil {
|
if err := env.getMinio(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -209,6 +215,12 @@ func (env *TravisEnvironment) Prepare() error {
|
||||||
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
|
if err := run("go", "get", "github.com/mitchellh/gox"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset changes made to go.mod/go.sum by "go get"
|
||||||
|
if err := run("git", "checkout", "go.mod", "go.sum"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" {
|
||||||
env.goxOSArch = []string{
|
env.goxOSArch = []string{
|
||||||
"linux/386", "linux/amd64",
|
"linux/386", "linux/amd64",
|
||||||
|
@ -431,6 +443,10 @@ func (env *AppveyorEnvironment) Teardown() error {
|
||||||
// findGoFiles returns a list of go source code file names below dir.
|
// findGoFiles returns a list of go source code file names below dir.
|
||||||
func findGoFiles(dir string) (list []string, err error) {
|
func findGoFiles(dir string) (list []string, err error) {
|
||||||
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
|
err = filepath.Walk(dir, func(name string, fi os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
relpath, err := filepath.Rel(dir, name)
|
relpath, err := filepath.Rel(dir, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -476,9 +492,9 @@ func updateEnv(env []string, override map[string]string) []string {
|
||||||
|
|
||||||
func (env *TravisEnvironment) findImports() (map[string][]string, error) {
|
func (env *TravisEnvironment) findImports() (map[string][]string, error) {
|
||||||
res := make(map[string][]string)
|
res := make(map[string][]string)
|
||||||
|
msg("checking for forbidden imports")
|
||||||
|
|
||||||
cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./internal/...", "./cmd/...")
|
cmd := exec.Command("go", "list", "-f", `{{.ImportPath}} {{join .Imports " "}}`, "./internal/...", "./cmd/...")
|
||||||
cmd.Env = updateEnv(os.Environ(), env.env)
|
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
|
|
18
vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
generated
vendored
18
vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.11.x
|
|
||||||
|
|
||||||
go_import_path: contrib.go.opencensus.io/exporter/ocagent
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
|
|
||||||
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go build ./... # Ensure dependency updates don't break build
|
|
||||||
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
|
|
||||||
- go vet ./...
|
|
||||||
- GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled
|
|
||||||
- GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules.
|
|
||||||
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
|
|
24
vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
generated
vendored
24
vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
generated
vendored
|
@ -1,24 +0,0 @@
|
||||||
# How to contribute
|
|
||||||
|
|
||||||
We'd love to accept your patches and contributions to this project. There are
|
|
||||||
just a few small guidelines you need to follow.
|
|
||||||
|
|
||||||
## Contributor License Agreement
|
|
||||||
|
|
||||||
Contributions to this project must be accompanied by a Contributor License
|
|
||||||
Agreement. You (or your employer) retain the copyright to your contribution,
|
|
||||||
this simply gives us permission to use and redistribute your contributions as
|
|
||||||
part of the project. Head over to <https://cla.developers.google.com/> to see
|
|
||||||
your current agreements on file or to sign a new one.
|
|
||||||
|
|
||||||
You generally only need to submit a CLA once, so if you've already submitted one
|
|
||||||
(even if it was for a different project), you probably don't need to do it
|
|
||||||
again.
|
|
||||||
|
|
||||||
## Code reviews
|
|
||||||
|
|
||||||
All submissions, including submissions by project members, require review. We
|
|
||||||
use GitHub pull requests for this purpose. Consult [GitHub Help] for more
|
|
||||||
information on using pull requests.
|
|
||||||
|
|
||||||
[GitHub Help]: https://help.github.com/articles/about-pull-requests/
|
|
61
vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
generated
vendored
61
vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
generated
vendored
|
@ -1,61 +0,0 @@
|
||||||
# OpenCensus Agent Go Exporter
|
|
||||||
|
|
||||||
[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
|
|
||||||
|
|
||||||
|
|
||||||
This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
|
|
||||||
OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
|
|
||||||
OpenCensus Library, export them to other backends and possibly push configurations back to
|
|
||||||
Library. See more details on [OC-Agent Readme][OCAgentReadme].
|
|
||||||
|
|
||||||
Note: This is an experimental repository and is likely to get backwards-incompatible changes.
|
|
||||||
Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ go get -u contrib.go.opencensus.io/exporter/ocagent
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"contrib.go.opencensus.io/exporter/ocagent"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Example() {
|
|
||||||
exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to create the agent exporter: %v", err)
|
|
||||||
}
|
|
||||||
defer exp.Stop()
|
|
||||||
|
|
||||||
// Now register it as a trace exporter.
|
|
||||||
trace.RegisterExporter(exp)
|
|
||||||
|
|
||||||
// Then use the OpenCensus tracing library, like we normally would.
|
|
||||||
ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
_, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
|
|
||||||
<-time.After(6 * time.Millisecond)
|
|
||||||
iSpan.End()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
|
|
||||||
[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
|
|
||||||
[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
|
|
||||||
[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
|
|
||||||
[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
|
|
||||||
[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
|
|
||||||
|
|
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
|
@ -1,38 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
// retries function fn upto n times, if fn returns an error lest it returns nil early.
|
|
||||||
// It applies exponential backoff in units of (1<<n) + jitter microsends.
|
|
||||||
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
|
|
||||||
for i := int64(0); i < nTries; i++ {
|
|
||||||
err = fn()
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Backoff for a time period with a pseudo-random jitter
|
|
||||||
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
|
|
||||||
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
|
|
||||||
<-time.After(ts)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
|
@ -1,97 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
sDisconnected int32 = 5 + iota
|
|
||||||
sConnected
|
|
||||||
)
|
|
||||||
|
|
||||||
func (ae *Exporter) setStateDisconnected() {
|
|
||||||
atomic.StoreInt32(&ae.connectionState, sDisconnected)
|
|
||||||
select {
|
|
||||||
case ae.disconnectedCh <- true:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) setStateConnected() {
|
|
||||||
atomic.StoreInt32(&ae.connectionState, sConnected)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) connected() bool {
|
|
||||||
return atomic.LoadInt32(&ae.connectionState) == sConnected
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultConnReattemptPeriod = 10 * time.Second
|
|
||||||
|
|
||||||
func (ae *Exporter) indefiniteBackgroundConnection() error {
|
|
||||||
defer func() {
|
|
||||||
ae.backgroundConnectionDoneCh <- true
|
|
||||||
}()
|
|
||||||
|
|
||||||
connReattemptPeriod := ae.reconnectionPeriod
|
|
||||||
if connReattemptPeriod <= 0 {
|
|
||||||
connReattemptPeriod = defaultConnReattemptPeriod
|
|
||||||
}
|
|
||||||
|
|
||||||
// No strong seeding required, nano time can
|
|
||||||
// already help with pseudo uniqueness.
|
|
||||||
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
|
|
||||||
|
|
||||||
// maxJitter: 1 + (70% of the connectionReattemptPeriod)
|
|
||||||
maxJitter := int64(1 + 0.7*float64(connReattemptPeriod))
|
|
||||||
|
|
||||||
for {
|
|
||||||
// Otherwise these will be the normal scenarios to enable
|
|
||||||
// reconnections if we trip out.
|
|
||||||
// 1. If we've stopped, return entirely
|
|
||||||
// 2. Otherwise block until we are disconnected, and
|
|
||||||
// then retry connecting
|
|
||||||
select {
|
|
||||||
case <-ae.stopCh:
|
|
||||||
return errStopped
|
|
||||||
|
|
||||||
case <-ae.disconnectedCh:
|
|
||||||
// Normal scenario that we'll wait for
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ae.connect(); err == nil {
|
|
||||||
ae.setStateConnected()
|
|
||||||
} else {
|
|
||||||
ae.setStateDisconnected()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply some jitter to avoid lockstep retrials of other
|
|
||||||
// agent-exporters. Lockstep retrials could result in an
|
|
||||||
// innocent DDOS, by clogging the machine's resources and network.
|
|
||||||
jitter := time.Duration(rng.Int63n(maxJitter))
|
|
||||||
<-time.After(connReattemptPeriod + jitter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) connect() error {
|
|
||||||
cc, err := ae.dialToAgent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ae.enableConnectionStreams(cc)
|
|
||||||
}
|
|
10
vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod
generated
vendored
10
vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
module contrib.go.opencensus.io/exporter/ocagent
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.0 // this is to match the version used in census-instrumentation/opencensus-service
|
|
||||||
github.com/golang/protobuf v1.3.1
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
|
||||||
go.opencensus.io v0.20.2
|
|
||||||
google.golang.org/api v0.3.1
|
|
||||||
google.golang.org/grpc v1.19.1
|
|
||||||
)
|
|
130
vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum
generated
vendored
130
vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum
generated
vendored
|
@ -1,130 +0,0 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
|
||||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
|
||||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
|
||||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
|
||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
|
||||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
|
||||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
|
||||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
|
||||||
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
|
||||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8=
|
|
||||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
|
||||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
|
@ -1,46 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
|
||||||
"go.opencensus.io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeWithStartTime creates a node using nodeName and derives:
|
|
||||||
// Hostname from the environment
|
|
||||||
// Pid from the current process
|
|
||||||
// StartTimestamp from the start time of this process
|
|
||||||
// Language and library information.
|
|
||||||
func NodeWithStartTime(nodeName string) *commonpb.Node {
|
|
||||||
return &commonpb.Node{
|
|
||||||
Identifier: &commonpb.ProcessIdentifier{
|
|
||||||
HostName: os.Getenv("HOSTNAME"),
|
|
||||||
Pid: uint32(os.Getpid()),
|
|
||||||
StartTimestamp: timeToTimestamp(startTime),
|
|
||||||
},
|
|
||||||
LibraryInfo: &commonpb.LibraryInfo{
|
|
||||||
Language: commonpb.LibraryInfo_GO_LANG,
|
|
||||||
ExporterVersion: Version,
|
|
||||||
CoreLibraryVersion: opencensus.Version(),
|
|
||||||
},
|
|
||||||
ServiceInfo: &commonpb.ServiceInfo{
|
|
||||||
Name: nodeName,
|
|
||||||
},
|
|
||||||
Attributes: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
|
@ -1,496 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/api/support/bundler"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
|
|
||||||
"go.opencensus.io/plugin/ocgrpc"
|
|
||||||
"go.opencensus.io/resource"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
|
|
||||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
|
||||||
agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"
|
|
||||||
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
|
|
||||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
|
||||||
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
|
||||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var startupMu sync.Mutex
|
|
||||||
var startTime time.Time
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
startupMu.Lock()
|
|
||||||
startTime = time.Now()
|
|
||||||
startupMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ trace.Exporter = (*Exporter)(nil)
|
|
||||||
var _ view.Exporter = (*Exporter)(nil)
|
|
||||||
|
|
||||||
type Exporter struct {
|
|
||||||
connectionState int32
|
|
||||||
|
|
||||||
// mu protects the non-atomic and non-channel variables
|
|
||||||
mu sync.RWMutex
|
|
||||||
// senderMu protects the concurrent unsafe traceExporter client
|
|
||||||
senderMu sync.RWMutex
|
|
||||||
started bool
|
|
||||||
stopped bool
|
|
||||||
agentAddress string
|
|
||||||
serviceName string
|
|
||||||
canDialInsecure bool
|
|
||||||
traceExporter agenttracepb.TraceService_ExportClient
|
|
||||||
metricsExporter agentmetricspb.MetricsService_ExportClient
|
|
||||||
nodeInfo *commonpb.Node
|
|
||||||
grpcClientConn *grpc.ClientConn
|
|
||||||
reconnectionPeriod time.Duration
|
|
||||||
resource *resourcepb.Resource
|
|
||||||
compressor string
|
|
||||||
headers map[string]string
|
|
||||||
|
|
||||||
startOnce sync.Once
|
|
||||||
stopCh chan bool
|
|
||||||
disconnectedCh chan bool
|
|
||||||
|
|
||||||
backgroundConnectionDoneCh chan bool
|
|
||||||
|
|
||||||
traceBundler *bundler.Bundler
|
|
||||||
|
|
||||||
// viewDataBundler is the bundler to enable conversion
|
|
||||||
// from OpenCensus-Go view.Data to metricspb.Metric.
|
|
||||||
// Please do not confuse it with metricsBundler!
|
|
||||||
viewDataBundler *bundler.Bundler
|
|
||||||
|
|
||||||
clientTransportCredentials credentials.TransportCredentials
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
|
|
||||||
exp, err := NewUnstartedExporter(opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := exp.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return exp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const spanDataBufferSize = 300
|
|
||||||
|
|
||||||
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
|
|
||||||
e := new(Exporter)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withExporter(e)
|
|
||||||
}
|
|
||||||
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
|
||||||
e.uploadTraces(bundle.([]*trace.SpanData))
|
|
||||||
})
|
|
||||||
traceBundler.DelayThreshold = 2 * time.Second
|
|
||||||
traceBundler.BundleCountThreshold = spanDataBufferSize
|
|
||||||
e.traceBundler = traceBundler
|
|
||||||
|
|
||||||
viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
|
||||||
e.uploadViewData(bundle.([]*view.Data))
|
|
||||||
})
|
|
||||||
viewDataBundler.DelayThreshold = 2 * time.Second
|
|
||||||
viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable.
|
|
||||||
e.viewDataBundler = viewDataBundler
|
|
||||||
e.nodeInfo = NodeWithStartTime(e.serviceName)
|
|
||||||
e.resource = resourceProtoFromEnv()
|
|
||||||
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxInitialConfigRetries = 10
|
|
||||||
maxInitialTracesRetries = 10
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errAlreadyStarted = errors.New("already started")
|
|
||||||
errNotStarted = errors.New("not started")
|
|
||||||
errStopped = errors.New("stopped")
|
|
||||||
errNoConnection = errors.New("no active connection")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Start dials to the agent, establishing a connection to it. It also
|
|
||||||
// initiates the Config and Trace services by sending over the initial
|
|
||||||
// messages that consist of the node identifier. Start invokes a background
|
|
||||||
// connector that will reattempt connections to the agent periodically
|
|
||||||
// if the connection dies.
|
|
||||||
func (ae *Exporter) Start() error {
|
|
||||||
var err = errAlreadyStarted
|
|
||||||
ae.startOnce.Do(func() {
|
|
||||||
ae.mu.Lock()
|
|
||||||
defer ae.mu.Unlock()
|
|
||||||
|
|
||||||
ae.started = true
|
|
||||||
ae.disconnectedCh = make(chan bool, 1)
|
|
||||||
ae.stopCh = make(chan bool)
|
|
||||||
ae.backgroundConnectionDoneCh = make(chan bool)
|
|
||||||
|
|
||||||
ae.setStateDisconnected()
|
|
||||||
go ae.indefiniteBackgroundConnection()
|
|
||||||
|
|
||||||
err = nil
|
|
||||||
})
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) prepareAgentAddress() string {
|
|
||||||
if ae.agentAddress != "" {
|
|
||||||
return ae.agentAddress
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error {
|
|
||||||
ae.mu.RLock()
|
|
||||||
started := ae.started
|
|
||||||
nodeInfo := ae.nodeInfo
|
|
||||||
ae.mu.RUnlock()
|
|
||||||
|
|
||||||
if !started {
|
|
||||||
return errNotStarted
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.mu.Lock()
|
|
||||||
// If the previous clientConn was non-nil, close it
|
|
||||||
if ae.grpcClientConn != nil {
|
|
||||||
_ = ae.grpcClientConn.Close()
|
|
||||||
}
|
|
||||||
ae.grpcClientConn = cc
|
|
||||||
ae.mu.Unlock()
|
|
||||||
|
|
||||||
if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
|
||||||
// Initiate the trace service by sending over node identifier info.
|
|
||||||
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
|
|
||||||
ctx := context.Background()
|
|
||||||
if len(ae.headers) > 0 {
|
|
||||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
|
||||||
}
|
|
||||||
traceExporter, err := traceSvcClient.Export(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
|
|
||||||
Node: node,
|
|
||||||
Resource: ae.resource,
|
|
||||||
}
|
|
||||||
if err := traceExporter.Send(firstTraceMessage); err != nil {
|
|
||||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.mu.Lock()
|
|
||||||
ae.traceExporter = traceExporter
|
|
||||||
ae.mu.Unlock()
|
|
||||||
|
|
||||||
// Initiate the config service by sending over node identifier info.
|
|
||||||
configStream, err := traceSvcClient.Config(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
|
|
||||||
}
|
|
||||||
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
|
|
||||||
if err := configStream.Send(firstCfgMessage); err != nil {
|
|
||||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// In the background, handle trace configurations that are beamed down
|
|
||||||
// by the agent, but also reply to it with the applied configuration.
|
|
||||||
go ae.handleConfigStreaming(configStream)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
|
||||||
metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
|
|
||||||
metricsExporter, err := metricsSvcClient.Export(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
|
|
||||||
}
|
|
||||||
// Initiate the metrics service by sending over the first message just containing the Node and Resource.
|
|
||||||
firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
|
|
||||||
Node: node,
|
|
||||||
Resource: ae.resource,
|
|
||||||
}
|
|
||||||
if err := metricsExporter.Send(firstMetricsMessage); err != nil {
|
|
||||||
return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.mu.Lock()
|
|
||||||
ae.metricsExporter = metricsExporter
|
|
||||||
ae.mu.Unlock()
|
|
||||||
|
|
||||||
// With that we are good to go and can start sending metrics
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
|
||||||
addr := ae.prepareAgentAddress()
|
|
||||||
var dialOpts []grpc.DialOption
|
|
||||||
if ae.clientTransportCredentials != nil {
|
|
||||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
|
|
||||||
} else if ae.canDialInsecure {
|
|
||||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
|
||||||
}
|
|
||||||
if ae.compressor != "" {
|
|
||||||
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
|
|
||||||
}
|
|
||||||
dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
if len(ae.headers) > 0 {
|
|
||||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
|
||||||
}
|
|
||||||
return grpc.DialContext(ctx, addr, dialOpts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
|
|
||||||
// Note: We haven't yet implemented configuration sending so we
|
|
||||||
// should NOT be changing connection states within this function for now.
|
|
||||||
for {
|
|
||||||
recv, err := configStream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Check if this is a transient error or exponential backoff-able.
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cfg := recv.Config
|
|
||||||
if cfg == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise now apply the trace configuration sent down from the agent
|
|
||||||
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
|
|
||||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
|
|
||||||
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
|
|
||||||
alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
|
|
||||||
if alwaysSample {
|
|
||||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
|
||||||
} else {
|
|
||||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
|
|
||||||
}
|
|
||||||
} else { // TODO: Add the rate limiting sampler here
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then finally send back to upstream the newly applied configuration
|
|
||||||
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop shuts down all the connections and resources
|
|
||||||
// related to the exporter.
|
|
||||||
func (ae *Exporter) Stop() error {
|
|
||||||
ae.mu.RLock()
|
|
||||||
cc := ae.grpcClientConn
|
|
||||||
started := ae.started
|
|
||||||
stopped := ae.stopped
|
|
||||||
ae.mu.RUnlock()
|
|
||||||
|
|
||||||
if !started {
|
|
||||||
return errNotStarted
|
|
||||||
}
|
|
||||||
if stopped {
|
|
||||||
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.Flush()
|
|
||||||
|
|
||||||
// Now close the underlying gRPC connection.
|
|
||||||
var err error
|
|
||||||
if cc != nil {
|
|
||||||
err = cc.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point we can change the state variables: started and stopped
|
|
||||||
ae.mu.Lock()
|
|
||||||
ae.started = false
|
|
||||||
ae.stopped = true
|
|
||||||
ae.mu.Unlock()
|
|
||||||
close(ae.stopCh)
|
|
||||||
|
|
||||||
// Ensure that the backgroundConnector returns
|
|
||||||
<-ae.backgroundConnectionDoneCh
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
|
|
||||||
if sd == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = ae.traceBundler.Add(sd, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
|
|
||||||
if batch == nil || len(batch.Spans) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ae.stopCh:
|
|
||||||
return errStopped
|
|
||||||
|
|
||||||
default:
|
|
||||||
if !ae.connected() {
|
|
||||||
return errNoConnection
|
|
||||||
}
|
|
||||||
|
|
||||||
ae.senderMu.Lock()
|
|
||||||
err := ae.traceExporter.Send(batch)
|
|
||||||
ae.senderMu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
ae.setStateDisconnected()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) ExportView(vd *view.Data) {
|
|
||||||
if vd == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_ = ae.viewDataBundler.Add(vd, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
|
|
||||||
if len(sdl) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
protoSpans := make([]*tracepb.Span, 0, len(sdl))
|
|
||||||
for _, sd := range sdl {
|
|
||||||
if sd != nil {
|
|
||||||
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return protoSpans
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
|
||||||
select {
|
|
||||||
case <-ae.stopCh:
|
|
||||||
return
|
|
||||||
|
|
||||||
default:
|
|
||||||
if !ae.connected() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
protoSpans := ocSpanDataToPbSpans(sdl)
|
|
||||||
if len(protoSpans) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ae.senderMu.Lock()
|
|
||||||
err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
|
||||||
Spans: protoSpans,
|
|
||||||
})
|
|
||||||
ae.senderMu.Unlock()
|
|
||||||
if err != nil {
|
|
||||||
ae.setStateDisconnected()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
|
|
||||||
if len(vdl) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
metrics := make([]*metricspb.Metric, 0, len(vdl))
|
|
||||||
for _, vd := range vdl {
|
|
||||||
if vd != nil {
|
|
||||||
vmetric, err := viewDataToMetric(vd)
|
|
||||||
// TODO: (@odeke-em) somehow report this error, if it is non-nil.
|
|
||||||
if err == nil && vmetric != nil {
|
|
||||||
metrics = append(metrics, vmetric)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) uploadViewData(vdl []*view.Data) {
|
|
||||||
select {
|
|
||||||
case <-ae.stopCh:
|
|
||||||
return
|
|
||||||
|
|
||||||
default:
|
|
||||||
if !ae.connected() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
protoMetrics := ocViewDataToPbMetrics(vdl)
|
|
||||||
if len(protoMetrics) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
|
|
||||||
Metrics: protoMetrics,
|
|
||||||
// TODO:(@odeke-em)
|
|
||||||
// a) Figure out how to derive a Node from the environment
|
|
||||||
// b) Figure out how to derive a Resource from the environment
|
|
||||||
// or better letting users of the exporter configure it.
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
ae.setStateDisconnected()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ae *Exporter) Flush() {
|
|
||||||
ae.traceBundler.Flush()
|
|
||||||
ae.viewDataBundler.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceProtoFromEnv() *resourcepb.Resource {
|
|
||||||
rs, _ := resource.FromEnv(context.Background())
|
|
||||||
if rs == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rprs := &resourcepb.Resource{
|
|
||||||
Type: rs.Type,
|
|
||||||
}
|
|
||||||
if rs.Labels != nil {
|
|
||||||
rprs.Labels = make(map[string]string)
|
|
||||||
for k, v := range rs.Labels {
|
|
||||||
rprs.Labels[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rprs
|
|
||||||
}
|
|
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
|
@ -1,128 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultAgentPort uint16 = 55678
|
|
||||||
DefaultAgentHost string = "localhost"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ExporterOption interface {
|
|
||||||
withExporter(e *Exporter)
|
|
||||||
}
|
|
||||||
|
|
||||||
type insecureGrpcConnection int
|
|
||||||
|
|
||||||
var _ ExporterOption = (*insecureGrpcConnection)(nil)
|
|
||||||
|
|
||||||
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
|
|
||||||
e.canDialInsecure = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithInsecure disables client transport security for the exporter's gRPC connection
|
|
||||||
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
|
|
||||||
// does. Note, by default, client security is required unless WithInsecure is used.
|
|
||||||
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
|
|
||||||
|
|
||||||
type addressSetter string
|
|
||||||
|
|
||||||
func (as addressSetter) withExporter(e *Exporter) {
|
|
||||||
e.agentAddress = string(as)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ExporterOption = (*addressSetter)(nil)
|
|
||||||
|
|
||||||
// WithAddress allows one to set the address that the exporter will
|
|
||||||
// connect to the agent on. If unset, it will instead try to use
|
|
||||||
// connect to DefaultAgentHost:DefaultAgentPort
|
|
||||||
func WithAddress(addr string) ExporterOption {
|
|
||||||
return addressSetter(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
type serviceNameSetter string
|
|
||||||
|
|
||||||
func (sns serviceNameSetter) withExporter(e *Exporter) {
|
|
||||||
e.serviceName = string(sns)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ExporterOption = (*serviceNameSetter)(nil)
|
|
||||||
|
|
||||||
// WithServiceName allows one to set/override the service name
|
|
||||||
// that the exporter will report to the agent.
|
|
||||||
func WithServiceName(serviceName string) ExporterOption {
|
|
||||||
return serviceNameSetter(serviceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
type reconnectionPeriod time.Duration
|
|
||||||
|
|
||||||
func (rp reconnectionPeriod) withExporter(e *Exporter) {
|
|
||||||
e.reconnectionPeriod = time.Duration(rp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithReconnectionPeriod(rp time.Duration) ExporterOption {
|
|
||||||
return reconnectionPeriod(rp)
|
|
||||||
}
|
|
||||||
|
|
||||||
type compressorSetter string
|
|
||||||
|
|
||||||
func (c compressorSetter) withExporter(e *Exporter) {
|
|
||||||
e.compressor = string(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseCompressor will set the compressor for the gRPC client to use when sending requests.
|
|
||||||
// It is the responsibility of the caller to ensure that the compressor set has been registered
|
|
||||||
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
|
|
||||||
// compressors auto-register on import, such as gzip, which can be registered by calling
|
|
||||||
// `import _ "google.golang.org/grpc/encoding/gzip"`
|
|
||||||
func UseCompressor(compressorName string) ExporterOption {
|
|
||||||
return compressorSetter(compressorName)
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerSetter map[string]string
|
|
||||||
|
|
||||||
func (h headerSetter) withExporter(e *Exporter) {
|
|
||||||
e.headers = map[string]string(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHeaders will send the provided headers when the gRPC stream connection
|
|
||||||
// is instantiated
|
|
||||||
func WithHeaders(headers map[string]string) ExporterOption {
|
|
||||||
return headerSetter(headers)
|
|
||||||
}
|
|
||||||
|
|
||||||
type clientCredentials struct {
|
|
||||||
credentials.TransportCredentials
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ ExporterOption = (*clientCredentials)(nil)
|
|
||||||
|
|
||||||
// WithTLSCredentials allows the connection to use TLS credentials
|
|
||||||
// when talking to the server. It takes in grpc.TransportCredentials instead
|
|
||||||
// of say a Certificate file or a tls.Certificate, because the retrieving
|
|
||||||
// these credentials can be done in many ways e.g. plain file, in code tls.Config
|
|
||||||
// or by certificate rotation, so it is up to the caller to decide what to use.
|
|
||||||
func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
|
|
||||||
return &clientCredentials{TransportCredentials: creds}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *clientCredentials) withExporter(e *Exporter) {
|
|
||||||
e.clientTransportCredentials = cc.TransportCredentials
|
|
||||||
}
|
|
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
|
@ -1,248 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"go.opencensus.io/trace/tracestate"
|
|
||||||
|
|
||||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
|
||||||
"github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxAnnotationEventsPerSpan = 32
|
|
||||||
maxMessageEventsPerSpan = 128
|
|
||||||
)
|
|
||||||
|
|
||||||
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
|
||||||
if sd == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var namePtr *tracepb.TruncatableString
|
|
||||||
if sd.Name != "" {
|
|
||||||
namePtr = &tracepb.TruncatableString{Value: sd.Name}
|
|
||||||
}
|
|
||||||
return &tracepb.Span{
|
|
||||||
TraceId: sd.TraceID[:],
|
|
||||||
SpanId: sd.SpanID[:],
|
|
||||||
ParentSpanId: sd.ParentSpanID[:],
|
|
||||||
Status: ocStatusToProtoStatus(sd.Status),
|
|
||||||
StartTime: timeToTimestamp(sd.StartTime),
|
|
||||||
EndTime: timeToTimestamp(sd.EndTime),
|
|
||||||
Links: ocLinksToProtoLinks(sd.Links),
|
|
||||||
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
|
|
||||||
Name: namePtr,
|
|
||||||
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
|
|
||||||
TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
|
|
||||||
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var blankStatus trace.Status
|
|
||||||
|
|
||||||
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
|
|
||||||
if status == blankStatus {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &tracepb.Status{
|
|
||||||
Code: status.Code,
|
|
||||||
Message: status.Message,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
|
|
||||||
if len(links) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sl := make([]*tracepb.Span_Link, 0, len(links))
|
|
||||||
for _, ocLink := range links {
|
|
||||||
// This redefinition is necessary to prevent ocLink.*ID[:] copies
|
|
||||||
// being reused -- in short we need a new ocLink per iteration.
|
|
||||||
ocLink := ocLink
|
|
||||||
|
|
||||||
sl = append(sl, &tracepb.Span_Link{
|
|
||||||
TraceId: ocLink.TraceID[:],
|
|
||||||
SpanId: ocLink.SpanID[:],
|
|
||||||
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &tracepb.Span_Links{
|
|
||||||
Link: sl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
|
|
||||||
switch oct {
|
|
||||||
case trace.LinkTypeChild:
|
|
||||||
return tracepb.Span_Link_CHILD_LINKED_SPAN
|
|
||||||
case trace.LinkTypeParent:
|
|
||||||
return tracepb.Span_Link_PARENT_LINKED_SPAN
|
|
||||||
default:
|
|
||||||
return tracepb.Span_Link_TYPE_UNSPECIFIED
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
|
|
||||||
if len(attrs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
outMap := make(map[string]*tracepb.AttributeValue)
|
|
||||||
for k, v := range attrs {
|
|
||||||
switch v := v.(type) {
|
|
||||||
case bool:
|
|
||||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
|
|
||||||
|
|
||||||
case int:
|
|
||||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
|
|
||||||
|
|
||||||
case int64:
|
|
||||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
|
|
||||||
|
|
||||||
case string:
|
|
||||||
outMap[k] = &tracepb.AttributeValue{
|
|
||||||
Value: &tracepb.AttributeValue_StringValue{
|
|
||||||
StringValue: &tracepb.TruncatableString{Value: v},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &tracepb.Span_Attributes{
|
|
||||||
AttributeMap: outMap,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This code is mostly copied from
|
|
||||||
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
|
|
||||||
func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
|
|
||||||
if len(as) == 0 && len(es) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
timeEvents := &tracepb.Span_TimeEvents{}
|
|
||||||
var annotations, droppedAnnotationsCount int
|
|
||||||
var messageEvents, droppedMessageEventsCount int
|
|
||||||
|
|
||||||
// Transform annotations
|
|
||||||
for i, a := range as {
|
|
||||||
if annotations >= maxAnnotationEventsPerSpan {
|
|
||||||
droppedAnnotationsCount = len(as) - i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
annotations++
|
|
||||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
|
||||||
&tracepb.Span_TimeEvent{
|
|
||||||
Time: timeToTimestamp(a.Time),
|
|
||||||
Value: transformAnnotationToTimeEvent(&a),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transform message events
|
|
||||||
for i, e := range es {
|
|
||||||
if messageEvents >= maxMessageEventsPerSpan {
|
|
||||||
droppedMessageEventsCount = len(es) - i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
messageEvents++
|
|
||||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
|
||||||
&tracepb.Span_TimeEvent{
|
|
||||||
Time: timeToTimestamp(e.Time),
|
|
||||||
Value: transformMessageEventToTimeEvent(&e),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process dropped counter
|
|
||||||
timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
|
|
||||||
timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
|
|
||||||
|
|
||||||
return timeEvents
|
|
||||||
}
|
|
||||||
|
|
||||||
func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
|
|
||||||
return &tracepb.Span_TimeEvent_Annotation_{
|
|
||||||
Annotation: &tracepb.Span_TimeEvent_Annotation{
|
|
||||||
Description: &tracepb.TruncatableString{Value: a.Message},
|
|
||||||
Attributes: ocAttributesToProtoAttributes(a.Attributes),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
|
|
||||||
return &tracepb.Span_TimeEvent_MessageEvent_{
|
|
||||||
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
|
|
||||||
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
|
|
||||||
Id: uint64(e.MessageID),
|
|
||||||
UncompressedSize: uint64(e.UncompressedByteSize),
|
|
||||||
CompressedSize: uint64(e.CompressedByteSize),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// clip32 clips an int to the range of an int32.
|
|
||||||
func clip32(x int) int32 {
|
|
||||||
if x < math.MinInt32 {
|
|
||||||
return math.MinInt32
|
|
||||||
}
|
|
||||||
if x > math.MaxInt32 {
|
|
||||||
return math.MaxInt32
|
|
||||||
}
|
|
||||||
return int32(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
|
|
||||||
nanoTime := t.UnixNano()
|
|
||||||
return ×tamp.Timestamp{
|
|
||||||
Seconds: nanoTime / 1e9,
|
|
||||||
Nanos: int32(nanoTime % 1e9),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
|
|
||||||
switch kind {
|
|
||||||
case trace.SpanKindClient:
|
|
||||||
return tracepb.Span_CLIENT
|
|
||||||
case trace.SpanKindServer:
|
|
||||||
return tracepb.Span_SERVER
|
|
||||||
default:
|
|
||||||
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
|
|
||||||
if ts == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &tracepb.Span_Tracestate{
|
|
||||||
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
|
|
||||||
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
|
|
||||||
for _, entry := range entries {
|
|
||||||
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
|
|
||||||
Key: entry.Key,
|
|
||||||
Value: entry.Value,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return protoEntries
|
|
||||||
}
|
|
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
|
@ -1,274 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
|
|
||||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errNilMeasure = errors.New("expecting a non-nil stats.Measure")
|
|
||||||
errNilView = errors.New("expecting a non-nil view.View")
|
|
||||||
errNilViewData = errors.New("expecting a non-nil view.Data")
|
|
||||||
)
|
|
||||||
|
|
||||||
func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
|
|
||||||
if vd == nil {
|
|
||||||
return nil, errNilViewData
|
|
||||||
}
|
|
||||||
|
|
||||||
descriptor, err := viewToMetricDescriptor(vd.View)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
timeseries, err := viewDataToTimeseries(vd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
metric := &metricspb.Metric{
|
|
||||||
MetricDescriptor: descriptor,
|
|
||||||
Timeseries: timeseries,
|
|
||||||
}
|
|
||||||
return metric, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
|
|
||||||
if v == nil {
|
|
||||||
return nil, errNilView
|
|
||||||
}
|
|
||||||
if v.Measure == nil {
|
|
||||||
return nil, errNilMeasure
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := &metricspb.MetricDescriptor{
|
|
||||||
Name: stringOrCall(v.Name, v.Measure.Name),
|
|
||||||
Description: stringOrCall(v.Description, v.Measure.Description),
|
|
||||||
Unit: v.Measure.Unit(),
|
|
||||||
Type: aggregationToMetricDescriptorType(v),
|
|
||||||
LabelKeys: tagKeysToLabelKeys(v.TagKeys),
|
|
||||||
}
|
|
||||||
return desc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringOrCall(first string, call func() string) string {
|
|
||||||
if first != "" {
|
|
||||||
return first
|
|
||||||
}
|
|
||||||
return call()
|
|
||||||
}
|
|
||||||
|
|
||||||
type measureType uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
measureUnknown measureType = iota
|
|
||||||
measureInt64
|
|
||||||
measureFloat64
|
|
||||||
)
|
|
||||||
|
|
||||||
func measureTypeFromMeasure(m stats.Measure) measureType {
|
|
||||||
switch m.(type) {
|
|
||||||
default:
|
|
||||||
return measureUnknown
|
|
||||||
case *stats.Float64Measure:
|
|
||||||
return measureFloat64
|
|
||||||
case *stats.Int64Measure:
|
|
||||||
return measureInt64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
|
|
||||||
if v == nil || v.Aggregation == nil {
|
|
||||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
|
||||||
}
|
|
||||||
if v.Measure == nil {
|
|
||||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v.Aggregation.Type {
|
|
||||||
case view.AggTypeCount:
|
|
||||||
// Cumulative on int64
|
|
||||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
|
||||||
|
|
||||||
case view.AggTypeDistribution:
|
|
||||||
// Cumulative types
|
|
||||||
return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
|
|
||||||
|
|
||||||
case view.AggTypeLastValue:
|
|
||||||
// Gauge types
|
|
||||||
switch measureTypeFromMeasure(v.Measure) {
|
|
||||||
case measureFloat64:
|
|
||||||
return metricspb.MetricDescriptor_GAUGE_DOUBLE
|
|
||||||
case measureInt64:
|
|
||||||
return metricspb.MetricDescriptor_GAUGE_INT64
|
|
||||||
}
|
|
||||||
|
|
||||||
case view.AggTypeSum:
|
|
||||||
// Cumulative types
|
|
||||||
switch measureTypeFromMeasure(v.Measure) {
|
|
||||||
case measureFloat64:
|
|
||||||
return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
|
|
||||||
case measureInt64:
|
|
||||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For all other cases, return unspecified.
|
|
||||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
|
||||||
}
|
|
||||||
|
|
||||||
func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
|
|
||||||
labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
|
|
||||||
for _, tagKey := range tagKeys {
|
|
||||||
labelKeys = append(labelKeys, &metricspb.LabelKey{
|
|
||||||
Key: tagKey.Name(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return labelKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
|
|
||||||
if vd == nil || len(vd.Rows) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given that view.Data only contains Start, End
|
|
||||||
// the timestamps for all the row data will be the exact same
|
|
||||||
// per aggregation. However, the values will differ.
|
|
||||||
// Each row has its own tags.
|
|
||||||
startTimestamp := timeToProtoTimestamp(vd.Start)
|
|
||||||
endTimestamp := timeToProtoTimestamp(vd.End)
|
|
||||||
|
|
||||||
mType := measureTypeFromMeasure(vd.View.Measure)
|
|
||||||
timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
|
|
||||||
// It is imperative that the ordering of "LabelValues" matches those
|
|
||||||
// of the Label keys in the metric descriptor.
|
|
||||||
for _, row := range vd.Rows {
|
|
||||||
labelValues := labelValuesFromTags(row.Tags)
|
|
||||||
point := rowToPoint(vd.View, row, endTimestamp, mType)
|
|
||||||
timeseries = append(timeseries, &metricspb.TimeSeries{
|
|
||||||
StartTimestamp: startTimestamp,
|
|
||||||
LabelValues: labelValues,
|
|
||||||
Points: []*metricspb.Point{point},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(timeseries) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return timeseries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
|
|
||||||
unixNano := t.UnixNano()
|
|
||||||
return ×tamp.Timestamp{
|
|
||||||
Seconds: int64(unixNano / 1e9),
|
|
||||||
Nanos: int32(unixNano % 1e9),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
|
|
||||||
pt := &metricspb.Point{
|
|
||||||
Timestamp: endTimestamp,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch data := row.Data.(type) {
|
|
||||||
case *view.CountData:
|
|
||||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
|
|
||||||
|
|
||||||
case *view.DistributionData:
|
|
||||||
pt.Value = &metricspb.Point_DistributionValue{
|
|
||||||
DistributionValue: &metricspb.DistributionValue{
|
|
||||||
Count: data.Count,
|
|
||||||
Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
|
|
||||||
// TODO: Add Exemplar
|
|
||||||
Buckets: bucketsToProtoBuckets(data.CountPerBucket),
|
|
||||||
BucketOptions: &metricspb.DistributionValue_BucketOptions{
|
|
||||||
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
|
|
||||||
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
|
|
||||||
Bounds: v.Aggregation.Buckets,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SumOfSquaredDeviation: data.SumOfSquaredDev,
|
|
||||||
}}
|
|
||||||
|
|
||||||
case *view.LastValueData:
|
|
||||||
setPointValue(pt, data.Value, mType)
|
|
||||||
|
|
||||||
case *view.SumData:
|
|
||||||
setPointValue(pt, data.Value, mType)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Not returning anything from this function because metricspb.Point.is_Value is an unexported
|
|
||||||
// interface hence we just have to set its value by pointer.
|
|
||||||
func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
|
|
||||||
if mType == measureInt64 {
|
|
||||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
|
|
||||||
} else {
|
|
||||||
pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
|
|
||||||
distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
|
|
||||||
for i := 0; i < len(countPerBucket); i++ {
|
|
||||||
count := countPerBucket[i]
|
|
||||||
|
|
||||||
distBuckets[i] = &metricspb.DistributionValue_Bucket{
|
|
||||||
Count: count,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return distBuckets
|
|
||||||
}
|
|
||||||
|
|
||||||
func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
|
|
||||||
if len(tags) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
labelValues := make([]*metricspb.LabelValue, 0, len(tags))
|
|
||||||
for _, tag_ := range tags {
|
|
||||||
labelValues = append(labelValues, &metricspb.LabelValue{
|
|
||||||
Value: tag_.Value,
|
|
||||||
|
|
||||||
// It is imperative that we set the "HasValue" attribute,
|
|
||||||
// in order to distinguish missing a label from the empty string.
|
|
||||||
// https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
|
|
||||||
//
|
|
||||||
// OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
|
|
||||||
// so the best case that we can use to distinguish missing labels/tags from the
|
|
||||||
// empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
|
|
||||||
// a value.
|
|
||||||
HasValue: tag_.Key.Name() != "",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return labelValues
|
|
||||||
}
|
|
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
|
@ -1,17 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocagent
|
|
||||||
|
|
||||||
const Version = "0.0.1"
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
Apache License
|
Apache License
|
||||||
Version 2.0, January 2004
|
Version 2.0, January 2004
|
||||||
http://www.apache.org/licenses/
|
http://www.apache.org/licenses/
|
||||||
|
@ -175,18 +176,7 @@
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
16
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
16
vendor/github.com/Azure/go-autorest/autorest/adal/README.md
generated
vendored
|
@ -135,7 +135,7 @@ resource := "https://management.core.windows.net/"
|
||||||
applicationSecret := "APPLICATION_SECRET"
|
applicationSecret := "APPLICATION_SECRET"
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalToken(
|
spt, err := adal.NewServicePrincipalToken(
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
appliationID,
|
appliationID,
|
||||||
applicationSecret,
|
applicationSecret,
|
||||||
resource,
|
resource,
|
||||||
|
@ -170,7 +170,7 @@ if err != nil {
|
||||||
}
|
}
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
applicationID,
|
applicationID,
|
||||||
certificate,
|
certificate,
|
||||||
rsaPrivateKey,
|
rsaPrivateKey,
|
||||||
|
@ -195,7 +195,7 @@ oauthClient := &http.Client{}
|
||||||
// Acquire the device code
|
// Acquire the device code
|
||||||
deviceCode, err := adal.InitiateDeviceAuth(
|
deviceCode, err := adal.InitiateDeviceAuth(
|
||||||
oauthClient,
|
oauthClient,
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
applicationID,
|
applicationID,
|
||||||
resource)
|
resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -212,7 +212,7 @@ if err != nil {
|
||||||
}
|
}
|
||||||
|
|
||||||
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
applicationID,
|
applicationID,
|
||||||
resource,
|
resource,
|
||||||
*token,
|
*token,
|
||||||
|
@ -227,7 +227,7 @@ if (err == nil) {
|
||||||
|
|
||||||
```Go
|
```Go
|
||||||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
applicationID,
|
applicationID,
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
|
@ -243,11 +243,11 @@ if (err == nil) {
|
||||||
|
|
||||||
``` Go
|
``` Go
|
||||||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
||||||
oauthConfig,
|
*oauthConfig,
|
||||||
applicationID,
|
applicationID,
|
||||||
clientSecret,
|
clientSecret,
|
||||||
authorizationCode,
|
authorizationCode,
|
||||||
redirectURI,
|
redirectURI,
|
||||||
resource,
|
resource,
|
||||||
callbacks...)
|
callbacks...)
|
||||||
|
|
||||||
|
|
62
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
62
vendor/github.com/Azure/go-autorest/autorest/adal/config.go
generated
vendored
|
@ -15,10 +15,15 @@ package adal
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
||||||
|
)
|
||||||
|
|
||||||
// OAuthConfig represents the endpoints needed
|
// OAuthConfig represents the endpoints needed
|
||||||
// in OAuth operations
|
// in OAuth operations
|
||||||
type OAuthConfig struct {
|
type OAuthConfig struct {
|
||||||
|
@ -60,7 +65,6 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
|
||||||
}
|
}
|
||||||
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
||||||
}
|
}
|
||||||
const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
|
||||||
u, err := url.Parse(activeDirectoryEndpoint)
|
u, err := url.Parse(activeDirectoryEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -89,3 +93,59 @@ func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiV
|
||||||
DeviceCodeEndpoint: *deviceCodeURL,
|
DeviceCodeEndpoint: *deviceCodeURL,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs.
|
||||||
|
type MultiTenantOAuthConfig interface {
|
||||||
|
PrimaryTenant() *OAuthConfig
|
||||||
|
AuxiliaryTenants() []*OAuthConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// OAuthOptions contains optional OAuthConfig creation arguments.
|
||||||
|
type OAuthOptions struct {
|
||||||
|
APIVersion string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c OAuthOptions) apiVersion() string {
|
||||||
|
if c.APIVersion != "" {
|
||||||
|
return fmt.Sprintf("?api-version=%s", c.APIVersion)
|
||||||
|
}
|
||||||
|
return "1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration.
|
||||||
|
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information.
|
||||||
|
func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) {
|
||||||
|
if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 {
|
||||||
|
return nil, errors.New("must specify one to three auxiliary tenants")
|
||||||
|
}
|
||||||
|
mtCfg := multiTenantOAuthConfig{
|
||||||
|
cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1),
|
||||||
|
}
|
||||||
|
apiVer := options.apiVersion()
|
||||||
|
pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err)
|
||||||
|
}
|
||||||
|
mtCfg.cfgs[0] = pri
|
||||||
|
for i := range auxiliaryTenantIDs {
|
||||||
|
aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err)
|
||||||
|
}
|
||||||
|
mtCfg.cfgs[i+1] = aux
|
||||||
|
}
|
||||||
|
return mtCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiTenantOAuthConfig struct {
|
||||||
|
// first config in the slice is the primary tenant
|
||||||
|
cfgs []*OAuthConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig {
|
||||||
|
return m.cfgs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig {
|
||||||
|
return m.cfgs[1:]
|
||||||
|
}
|
||||||
|
|
11
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
Normal file
11
vendor/github.com/Azure/go-autorest/autorest/adal/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/Azure/go-autorest/autorest/adal
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||||
|
)
|
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
Normal file
12
vendor/github.com/Azure/go-autorest/autorest/adal/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
37
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
37
vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
generated
vendored
|
@ -15,7 +15,12 @@ package adal
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/cookiejar"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -23,6 +28,9 @@ const (
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultSender Sender
|
||||||
|
var defaultSenderInit = &sync.Once{}
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||||
//
|
//
|
||||||
// The standard http.Client conforms to this interface.
|
// The standard http.Client conforms to this interface.
|
||||||
|
@ -45,7 +53,7 @@ type SendDecorator func(Sender) Sender
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
func CreateSender(decorators ...SendDecorator) Sender {
|
||||||
return DecorateSender(&http.Client{}, decorators...)
|
return DecorateSender(sender(), decorators...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
||||||
|
@ -58,3 +66,30 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sender() Sender {
|
||||||
|
// note that we can't init defaultSender in init() since it will
|
||||||
|
// execute before calling code has had a chance to enable tracing
|
||||||
|
defaultSenderInit.Do(func() {
|
||||||
|
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||||
|
defaultTransport := http.DefaultTransport.(*http.Transport)
|
||||||
|
transport := &http.Transport{
|
||||||
|
Proxy: defaultTransport.Proxy,
|
||||||
|
DialContext: defaultTransport.DialContext,
|
||||||
|
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||||
|
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||||
|
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||||
|
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var roundTripper http.RoundTripper = transport
|
||||||
|
if tracing.IsEnabled() {
|
||||||
|
roundTripper = tracing.NewTransport(transport)
|
||||||
|
}
|
||||||
|
j, _ := cookiejar.New(nil)
|
||||||
|
defaultSender = &http.Client{Jar: j, Transport: roundTripper}
|
||||||
|
})
|
||||||
|
return defaultSender
|
||||||
|
}
|
||||||
|
|
103
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
103
vendor/github.com/Azure/go-autorest/autorest/adal/token.go
generated
vendored
|
@ -34,7 +34,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/date"
|
"github.com/Azure/go-autorest/autorest/date"
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
"github.com/dgrijalva/jwt-go"
|
"github.com/dgrijalva/jwt-go"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -71,6 +70,12 @@ type OAuthTokenProvider interface {
|
||||||
OAuthToken() string
|
OAuthToken() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization.
|
||||||
|
type MultitenantOAuthTokenProvider interface {
|
||||||
|
PrimaryOAuthToken() string
|
||||||
|
AuxiliaryOAuthTokens() []string
|
||||||
|
}
|
||||||
|
|
||||||
// TokenRefreshError is an interface used by errors returned during token refresh.
|
// TokenRefreshError is an interface used by errors returned during token refresh.
|
||||||
type TokenRefreshError interface {
|
type TokenRefreshError interface {
|
||||||
error
|
error
|
||||||
|
@ -390,7 +395,7 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
|
||||||
spt.refreshLock = &sync.RWMutex{}
|
spt.refreshLock = &sync.RWMutex{}
|
||||||
}
|
}
|
||||||
if spt.sender == nil {
|
if spt.sender == nil {
|
||||||
spt.sender = &http.Client{Transport: tracing.Transport}
|
spt.sender = sender()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -438,7 +443,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
|
||||||
RefreshWithin: defaultRefresh,
|
RefreshWithin: defaultRefresh,
|
||||||
},
|
},
|
||||||
refreshLock: &sync.RWMutex{},
|
refreshLock: &sync.RWMutex{},
|
||||||
sender: &http.Client{Transport: tracing.Transport},
|
sender: sender(),
|
||||||
refreshCallbacks: callbacks,
|
refreshCallbacks: callbacks,
|
||||||
}
|
}
|
||||||
return spt, nil
|
return spt, nil
|
||||||
|
@ -679,7 +684,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
||||||
RefreshWithin: defaultRefresh,
|
RefreshWithin: defaultRefresh,
|
||||||
},
|
},
|
||||||
refreshLock: &sync.RWMutex{},
|
refreshLock: &sync.RWMutex{},
|
||||||
sender: &http.Client{Transport: tracing.Transport},
|
sender: sender(),
|
||||||
refreshCallbacks: callbacks,
|
refreshCallbacks: callbacks,
|
||||||
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
|
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
|
||||||
}
|
}
|
||||||
|
@ -983,3 +988,93 @@ func (spt *ServicePrincipalToken) Token() Token {
|
||||||
defer spt.refreshLock.RUnlock()
|
defer spt.refreshLock.RUnlock()
|
||||||
return spt.inner.Token
|
return spt.inner.Token
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization.
|
||||||
|
type MultiTenantServicePrincipalToken struct {
|
||||||
|
PrimaryToken *ServicePrincipalToken
|
||||||
|
AuxiliaryTokens []*ServicePrincipalToken
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryOAuthToken returns the primary authorization token.
|
||||||
|
func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string {
|
||||||
|
return mt.PrimaryToken.OAuthToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens.
|
||||||
|
func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
|
||||||
|
tokens := make([]string, len(mt.AuxiliaryTokens))
|
||||||
|
for i := range mt.AuxiliaryTokens {
|
||||||
|
tokens[i] = mt.AuxiliaryTokens[i].OAuthToken()
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
|
||||||
|
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
|
||||||
|
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
|
||||||
|
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh primary token: %v", err)
|
||||||
|
}
|
||||||
|
for _, aux := range mt.AuxiliaryTokens {
|
||||||
|
if err := aux.EnsureFreshWithContext(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshWithContext obtains a fresh token for the Service Principal.
|
||||||
|
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
|
||||||
|
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh primary token: %v", err)
|
||||||
|
}
|
||||||
|
for _, aux := range mt.AuxiliaryTokens {
|
||||||
|
if err := aux.RefreshWithContext(ctx); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RefreshExchangeWithContext refreshes the token, but for a different resource.
|
||||||
|
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
|
||||||
|
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh primary token: %v", err)
|
||||||
|
}
|
||||||
|
for _, aux := range mt.AuxiliaryTokens {
|
||||||
|
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
|
||||||
|
func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
|
||||||
|
if err := validateStringParam(clientID, "clientID"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(secret, "secret"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := validateStringParam(resource, "resource"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
auxTenants := multiTenantCfg.AuxiliaryTenants()
|
||||||
|
m := MultiTenantServicePrincipalToken{
|
||||||
|
AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
|
||||||
|
}
|
||||||
|
primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
|
||||||
|
}
|
||||||
|
m.PrimaryToken = primary
|
||||||
|
for i := range auxTenants {
|
||||||
|
aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
|
||||||
|
}
|
||||||
|
m.AuxiliaryTokens[i] = aux
|
||||||
|
}
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
59
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
59
vendor/github.com/Azure/go-autorest/autorest/authorization.go
generated
vendored
|
@ -15,6 +15,7 @@ package autorest
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -22,7 +23,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -149,11 +149,11 @@ type BearerAuthorizerCallback struct {
|
||||||
|
|
||||||
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
|
||||||
// is invoked when the HTTP request is submitted.
|
// is invoked when the HTTP request is submitted.
|
||||||
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
|
||||||
if sender == nil {
|
if s == nil {
|
||||||
sender = &http.Client{Transport: tracing.Transport}
|
s = sender(tls.RenegotiateNever)
|
||||||
}
|
}
|
||||||
return &BearerAuthorizerCallback{sender: sender, callback: callback}
|
return &BearerAuthorizerCallback{sender: s, callback: callback}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
|
||||||
|
@ -285,3 +285,52 @@ func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
|
||||||
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants.
|
||||||
|
type MultiTenantServicePrincipalTokenAuthorizer interface {
|
||||||
|
WithAuthorization() PrepareDecorator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider
|
||||||
|
func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer {
|
||||||
|
return &multiTenantSPTAuthorizer{tp: tp}
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiTenantSPTAuthorizer struct {
|
||||||
|
tp adal.MultitenantOAuthTokenProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the
|
||||||
|
// primary token along with the auxiliary authorization header using the auxiliary tokens.
|
||||||
|
//
|
||||||
|
// By default, the token will be automatically refreshed through the Refresher interface.
|
||||||
|
func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
if refresher, ok := mt.tp.(adal.RefresherWithContext); ok {
|
||||||
|
err = refresher.EnsureFreshWithContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
var resp *http.Response
|
||||||
|
if tokError, ok := err.(adal.TokenRefreshError); ok {
|
||||||
|
resp = tokError.Response()
|
||||||
|
}
|
||||||
|
return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp,
|
||||||
|
"Failed to refresh one or more Tokens for request to %s", r.URL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken())))
|
||||||
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
auxTokens := mt.tp.AuxiliaryOAuthTokens()
|
||||||
|
for i := range auxTokens {
|
||||||
|
auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i])
|
||||||
|
}
|
||||||
|
return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; ")))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
5
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
5
vendor/github.com/Azure/go-autorest/autorest/azure/async.go
generated
vendored
|
@ -417,6 +417,11 @@ func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest
|
||||||
}
|
}
|
||||||
|
|
||||||
req = req.WithContext(ctx)
|
req = req.WithContext(ctx)
|
||||||
|
preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...)
|
||||||
|
req, err = preparer.Prepare(req)
|
||||||
|
if err != nil {
|
||||||
|
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request")
|
||||||
|
}
|
||||||
pt.resp, err = sender.Do(req)
|
pt.resp, err = sender.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
||||||
|
|
5
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
5
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
|
@ -45,6 +45,7 @@ type ResourceIdentifier struct {
|
||||||
Datalake string `json:"datalake"`
|
Datalake string `json:"datalake"`
|
||||||
Batch string `json:"batch"`
|
Batch string `json:"batch"`
|
||||||
OperationalInsights string `json:"operationalInsights"`
|
OperationalInsights string `json:"operationalInsights"`
|
||||||
|
Storage string `json:"storage"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
// Environment represents a set of endpoints for each of Azure's Clouds.
|
||||||
|
@ -103,6 +104,7 @@ var (
|
||||||
Datalake: "https://datalake.azure.net/",
|
Datalake: "https://datalake.azure.net/",
|
||||||
Batch: "https://batch.core.windows.net/",
|
Batch: "https://batch.core.windows.net/",
|
||||||
OperationalInsights: "https://api.loganalytics.io",
|
OperationalInsights: "https://api.loganalytics.io",
|
||||||
|
Storage: "https://storage.azure.com/",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,6 +137,7 @@ var (
|
||||||
Datalake: NotAvailable,
|
Datalake: NotAvailable,
|
||||||
Batch: "https://batch.core.usgovcloudapi.net/",
|
Batch: "https://batch.core.usgovcloudapi.net/",
|
||||||
OperationalInsights: "https://api.loganalytics.us",
|
OperationalInsights: "https://api.loganalytics.us",
|
||||||
|
Storage: "https://storage.azure.com/",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,6 +170,7 @@ var (
|
||||||
Datalake: NotAvailable,
|
Datalake: NotAvailable,
|
||||||
Batch: "https://batch.chinacloudapi.cn/",
|
Batch: "https://batch.chinacloudapi.cn/",
|
||||||
OperationalInsights: NotAvailable,
|
OperationalInsights: NotAvailable,
|
||||||
|
Storage: "https://storage.azure.com/",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,6 +203,7 @@ var (
|
||||||
Datalake: NotAvailable,
|
Datalake: NotAvailable,
|
||||||
Batch: "https://batch.cloudapi.de/",
|
Batch: "https://batch.cloudapi.de/",
|
||||||
OperationalInsights: NotAvailable,
|
OperationalInsights: NotAvailable,
|
||||||
|
Storage: "https://storage.azure.com/",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
42
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
42
vendor/github.com/Azure/go-autorest/autorest/client.go
generated
vendored
|
@ -22,12 +22,10 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/cookiejar"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/logger"
|
"github.com/Azure/go-autorest/logger"
|
||||||
"github.com/Azure/go-autorest/tracing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -73,6 +71,22 @@ type Response struct {
|
||||||
*http.Response `json:"-"`
|
*http.Response `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code.
|
||||||
|
// If there was no response (i.e. the underlying http.Response is nil) the return value is false.
|
||||||
|
func (r Response) IsHTTPStatus(statusCode int) bool {
|
||||||
|
if r.Response == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return r.Response.StatusCode == statusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes.
|
||||||
|
// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided
|
||||||
|
// the return value is false.
|
||||||
|
func (r Response) HasHTTPStatus(statusCodes ...int) bool {
|
||||||
|
return ResponseHasStatusCode(r.Response, statusCodes...)
|
||||||
|
}
|
||||||
|
|
||||||
// LoggingInspector implements request and response inspectors that log the full request and
|
// LoggingInspector implements request and response inspectors that log the full request and
|
||||||
// response to a supplied log.
|
// response to a supplied log.
|
||||||
type LoggingInspector struct {
|
type LoggingInspector struct {
|
||||||
|
@ -248,30 +262,8 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
||||||
// sender returns the Sender to which to send requests.
|
// sender returns the Sender to which to send requests.
|
||||||
func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
|
func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
|
||||||
if c.Sender == nil {
|
if c.Sender == nil {
|
||||||
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
return sender(renengotiation)
|
||||||
var defaultTransport = http.DefaultTransport.(*http.Transport)
|
|
||||||
transport := tracing.Transport
|
|
||||||
// for non-default values of TLS renegotiation create a new tracing transport.
|
|
||||||
// updating tracing.Transport affects all clients which is not what we want.
|
|
||||||
if renengotiation != tls.RenegotiateNever {
|
|
||||||
transport = tracing.NewTransport()
|
|
||||||
}
|
|
||||||
transport.Base = &http.Transport{
|
|
||||||
Proxy: defaultTransport.Proxy,
|
|
||||||
DialContext: defaultTransport.DialContext,
|
|
||||||
MaxIdleConns: defaultTransport.MaxIdleConns,
|
|
||||||
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
|
||||||
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
|
||||||
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
|
||||||
TLSClientConfig: &tls.Config{
|
|
||||||
MinVersion: tls.VersionTLS12,
|
|
||||||
Renegotiation: renengotiation,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
j, _ := cookiejar.New(nil)
|
|
||||||
return &http.Client{Jar: j, Transport: transport}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.Sender
|
return c.Sender
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,18 +176,7 @@
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
3
vendor/github.com/Azure/go-autorest/autorest/date/go.mod
generated
vendored
Normal file
3
vendor/github.com/Azure/go-autorest/autorest/date/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/Azure/go-autorest/autorest/date
|
||||||
|
|
||||||
|
go 1.12
|
11
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
Normal file
11
vendor/github.com/Azure/go-autorest/autorest/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
module github.com/Azure/go-autorest/autorest
|
||||||
|
|
||||||
|
go 1.12
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||||
|
)
|
18
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
Normal file
18
vendor/github.com/Azure/go-autorest/autorest/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
|
||||||
|
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
|
||||||
|
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
|
||||||
|
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||||
|
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||||
|
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
88
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
88
vendor/github.com/Azure/go-autorest/autorest/preparer.go
generated
vendored
|
@ -16,7 +16,9 @@ package autorest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -31,11 +33,33 @@ const (
|
||||||
mimeTypeOctetStream = "application/octet-stream"
|
mimeTypeOctetStream = "application/octet-stream"
|
||||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||||
|
|
||||||
headerAuthorization = "Authorization"
|
headerAuthorization = "Authorization"
|
||||||
headerContentType = "Content-Type"
|
headerAuxAuthorization = "x-ms-authorization-auxiliary"
|
||||||
headerUserAgent = "User-Agent"
|
headerContentType = "Content-Type"
|
||||||
|
headerUserAgent = "User-Agent"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// used as a key type in context.WithValue()
|
||||||
|
type ctxPrepareDecorators struct{}
|
||||||
|
|
||||||
|
// WithPrepareDecorators adds the specified PrepareDecorators to the provided context.
|
||||||
|
// If no PrepareDecorators are provided the context is unchanged.
|
||||||
|
func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context {
|
||||||
|
if len(prepareDecorator) == 0 {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators.
|
||||||
|
func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator {
|
||||||
|
inCtx := ctx.Value(ctxPrepareDecorators{})
|
||||||
|
if pd, ok := inCtx.([]PrepareDecorator); ok {
|
||||||
|
return pd
|
||||||
|
}
|
||||||
|
return defaultPrepareDecorators
|
||||||
|
}
|
||||||
|
|
||||||
// Preparer is the interface that wraps the Prepare method.
|
// Preparer is the interface that wraps the Prepare method.
|
||||||
//
|
//
|
||||||
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
|
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
|
||||||
|
@ -190,6 +214,9 @@ func AsGet() PrepareDecorator { return WithMethod("GET") }
|
||||||
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
|
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
|
||||||
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
|
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
|
||||||
|
|
||||||
|
// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE.
|
||||||
|
func AsMerge() PrepareDecorator { return WithMethod("MERGE") }
|
||||||
|
|
||||||
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
|
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
|
||||||
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
|
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
|
||||||
|
|
||||||
|
@ -225,6 +252,25 @@ func WithBaseURL(baseURL string) PrepareDecorator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithBytes returns a PrepareDecorator that takes a list of bytes
|
||||||
|
// which passes the bytes directly to the body
|
||||||
|
func WithBytes(input *[]byte) PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err == nil {
|
||||||
|
if input == nil {
|
||||||
|
return r, fmt.Errorf("Input Bytes was nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
r.ContentLength = int64(len(*input))
|
||||||
|
r.Body = ioutil.NopCloser(bytes.NewReader(*input))
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
|
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
|
||||||
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
|
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
|
||||||
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
|
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
|
||||||
|
@ -377,6 +423,28 @@ func WithJSON(v interface{}) PrepareDecorator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the
|
||||||
|
// request and sets the Content-Length header.
|
||||||
|
func WithXML(v interface{}) PrepareDecorator {
|
||||||
|
return func(p Preparer) Preparer {
|
||||||
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
|
r, err := p.Prepare(r)
|
||||||
|
if err == nil {
|
||||||
|
b, err := xml.Marshal(v)
|
||||||
|
if err == nil {
|
||||||
|
// we have to tack on an XML header
|
||||||
|
withHeader := xml.Header + string(b)
|
||||||
|
bytesWithHeader := []byte(withHeader)
|
||||||
|
|
||||||
|
r.ContentLength = int64(len(bytesWithHeader))
|
||||||
|
r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
|
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
|
||||||
// is absolute (that is, it begins with a "/"), it replaces the existing path.
|
// is absolute (that is, it begins with a "/"), it replaces the existing path.
|
||||||
func WithPath(path string) PrepareDecorator {
|
func WithPath(path string) PrepareDecorator {
|
||||||
|
@ -455,7 +523,7 @@ func parseURL(u *url.URL, path string) (*url.URL, error) {
|
||||||
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
|
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
|
||||||
// given in the supplied map (i.e., key=value).
|
// given in the supplied map (i.e., key=value).
|
||||||
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
|
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
|
||||||
parameters := ensureValueStrings(queryParameters)
|
parameters := MapToValues(queryParameters)
|
||||||
return func(p Preparer) Preparer {
|
return func(p Preparer) Preparer {
|
||||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||||
r, err := p.Prepare(r)
|
r, err := p.Prepare(r)
|
||||||
|
@ -463,14 +531,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato
|
||||||
if r.URL == nil {
|
if r.URL == nil {
|
||||||
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
v := r.URL.Query()
|
v := r.URL.Query()
|
||||||
for key, value := range parameters {
|
for key, value := range parameters {
|
||||||
d, err := url.QueryUnescape(value)
|
for i := range value {
|
||||||
if err != nil {
|
d, err := url.QueryUnescape(value[i])
|
||||||
return r, err
|
if err != nil {
|
||||||
|
return r, err
|
||||||
|
}
|
||||||
|
value[i] = d
|
||||||
}
|
}
|
||||||
v.Add(key, d)
|
v[key] = value
|
||||||
}
|
}
|
||||||
r.URL.RawQuery = v.Encode()
|
r.URL.RawQuery = v.Encode()
|
||||||
}
|
}
|
||||||
|
|
19
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
19
vendor/github.com/Azure/go-autorest/autorest/responder.go
generated
vendored
|
@ -153,6 +153,25 @@ func ByClosingIfError() RespondDecorator {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the
|
||||||
|
// response Body into the value pointed to by v.
|
||||||
|
func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
|
||||||
|
return func(r Responder) Responder {
|
||||||
|
return ResponderFunc(func(resp *http.Response) error {
|
||||||
|
err := r.Respond(resp)
|
||||||
|
if err == nil {
|
||||||
|
bytes, errInner := ioutil.ReadAll(resp.Body)
|
||||||
|
if errInner != nil {
|
||||||
|
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
||||||
|
} else {
|
||||||
|
*v = bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
|
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
|
||||||
// response Body into the value pointed to by v.
|
// response Body into the value pointed to by v.
|
||||||
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
|
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
|
||||||
|
|
159
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
159
vendor/github.com/Azure/go-autorest/autorest/sender.go
generated
vendored
|
@ -15,16 +15,40 @@ package autorest
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/cookiejar"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/go-autorest/tracing"
|
"github.com/Azure/go-autorest/tracing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// used as a key type in context.WithValue()
|
||||||
|
type ctxSendDecorators struct{}
|
||||||
|
|
||||||
|
// WithSendDecorators adds the specified SendDecorators to the provided context.
|
||||||
|
// If no SendDecorators are provided the context is unchanged.
|
||||||
|
func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context {
|
||||||
|
if len(sendDecorator) == 0 {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators.
|
||||||
|
func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator {
|
||||||
|
inCtx := ctx.Value(ctxSendDecorators{})
|
||||||
|
if sd, ok := inCtx.([]SendDecorator); ok {
|
||||||
|
return sd
|
||||||
|
}
|
||||||
|
return defaultSendDecorators
|
||||||
|
}
|
||||||
|
|
||||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||||
//
|
//
|
||||||
// The standard http.Client conforms to this interface.
|
// The standard http.Client conforms to this interface.
|
||||||
|
@ -47,7 +71,7 @@ type SendDecorator func(Sender) Sender
|
||||||
|
|
||||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
||||||
func CreateSender(decorators ...SendDecorator) Sender {
|
func CreateSender(decorators ...SendDecorator) Sender {
|
||||||
return DecorateSender(&http.Client{}, decorators...)
|
return DecorateSender(sender(tls.RenegotiateNever), decorators...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
||||||
|
@ -70,7 +94,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||||
//
|
//
|
||||||
// Send will not poll or retry requests.
|
// Send will not poll or retry requests.
|
||||||
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
||||||
return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
|
return SendWithSender(sender(tls.RenegotiateNever), r, decorators...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
||||||
|
@ -82,6 +106,29 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
|
||||||
return DecorateSender(s, decorators...).Do(r)
|
return DecorateSender(s, decorators...).Do(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func sender(renengotiation tls.RenegotiationSupport) Sender {
|
||||||
|
// Use behaviour compatible with DefaultTransport, but require TLS minimum version.
|
||||||
|
defaultTransport := http.DefaultTransport.(*http.Transport)
|
||||||
|
transport := &http.Transport{
|
||||||
|
Proxy: defaultTransport.Proxy,
|
||||||
|
DialContext: defaultTransport.DialContext,
|
||||||
|
MaxIdleConns: defaultTransport.MaxIdleConns,
|
||||||
|
IdleConnTimeout: defaultTransport.IdleConnTimeout,
|
||||||
|
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
|
||||||
|
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
Renegotiation: renengotiation,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var roundTripper http.RoundTripper = transport
|
||||||
|
if tracing.IsEnabled() {
|
||||||
|
roundTripper = tracing.NewTransport(transport)
|
||||||
|
}
|
||||||
|
j, _ := cookiejar.New(nil)
|
||||||
|
return &http.Client{Jar: j, Transport: roundTripper}
|
||||||
|
}
|
||||||
|
|
||||||
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
|
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
|
||||||
// invoking the Sender. The delay may be terminated by closing the optional channel on the
|
// invoking the Sender. The delay may be terminated by closing the optional channel on the
|
||||||
// http.Request. If canceled, no further Senders are invoked.
|
// http.Request. If canceled, no further Senders are invoked.
|
||||||
|
@ -211,53 +258,73 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||||
|
|
||||||
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
||||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
// number of attempts, exponentially backing off between requests using the supplied backoff
|
||||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request.
|
||||||
// the http.Request.
|
// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts.
|
||||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||||
return func(s Sender) Sender {
|
return func(s Sender) Sender {
|
||||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
rr := NewRetriableRequest(r)
|
return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...)
|
||||||
// Increment to add the first call (attempts denotes number of retries)
|
|
||||||
for attempt := 0; attempt < attempts+1; {
|
|
||||||
err = rr.Prepare()
|
|
||||||
if err != nil {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
resp, err = s.Do(rr.Request())
|
|
||||||
// if the error isn't temporary don't bother retrying
|
|
||||||
if err != nil && !IsTemporaryNetworkError(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
|
||||||
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
|
||||||
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
|
||||||
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
|
||||||
return resp, r.Context().Err()
|
|
||||||
}
|
|
||||||
// don't count a 429 against the number of attempts
|
|
||||||
// so that we continue to retry until it succeeds
|
|
||||||
if resp == nil || resp.StatusCode != http.StatusTooManyRequests {
|
|
||||||
attempt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
|
// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the
|
||||||
// responses with status code 429
|
// specified number of attempts, exponentially backing off between requests using the supplied backoff
|
||||||
|
// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater
|
||||||
|
// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request.
|
||||||
|
func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator {
|
||||||
|
return func(s Sender) Sender {
|
||||||
|
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
|
return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) {
|
||||||
|
rr := NewRetriableRequest(r)
|
||||||
|
// Increment to add the first call (attempts denotes number of retries)
|
||||||
|
for attempt := 0; attempt < attempts+1; {
|
||||||
|
err = rr.Prepare()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resp, err = s.Do(rr.Request())
|
||||||
|
// we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication
|
||||||
|
// resp and err will both have a value, so in this case we don't want to retry as it will never succeed.
|
||||||
|
if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||||
|
if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) {
|
||||||
|
return resp, r.Context().Err()
|
||||||
|
}
|
||||||
|
// when count429 == false don't count a 429 against the number
|
||||||
|
// of attempts so that we continue to retry until it succeeds
|
||||||
|
if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) {
|
||||||
|
attempt++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header.
|
||||||
|
// The value of Retry-After can be either the number of seconds or a date in RFC1123 format.
|
||||||
|
// The function returns true after successfully waiting for the specified duration. If there is
|
||||||
|
// no Retry-After header or the wait is cancelled the return value is false.
|
||||||
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
|
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
|
var dur time.Duration
|
||||||
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
|
ra := resp.Header.Get("Retry-After")
|
||||||
|
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
|
||||||
|
dur = time.Duration(retryAfter) * time.Second
|
||||||
|
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
|
||||||
|
dur = t.Sub(time.Now())
|
||||||
|
}
|
||||||
|
if dur > 0 {
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Duration(retryAfter) * time.Second):
|
case <-time.After(dur):
|
||||||
return true
|
return true
|
||||||
case <-cancel:
|
case <-cancel:
|
||||||
return false
|
return false
|
||||||
|
@ -317,8 +384,22 @@ func WithLogging(logger *log.Logger) SendDecorator {
|
||||||
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
||||||
// count.
|
// count.
|
||||||
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
|
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
|
||||||
|
return DelayForBackoffWithCap(backoff, 0, attempt, cancel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of
|
||||||
|
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
|
||||||
|
// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap.
|
||||||
|
// The delay may be canceled by closing the passed channel. If terminated early, returns false.
|
||||||
|
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
||||||
|
// count.
|
||||||
|
func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool {
|
||||||
|
d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second
|
||||||
|
if cap > 0 && d > cap {
|
||||||
|
d = cap
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
|
case <-time.After(d):
|
||||||
return true
|
return true
|
||||||
case <-cancel:
|
case <-cancel:
|
||||||
return false
|
return false
|
||||||
|
|
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
2
vendor/github.com/Azure/go-autorest/autorest/version.go
generated
vendored
|
@ -19,7 +19,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
const number = "v12.0.0"
|
const number = "v13.0.2"
|
||||||
|
|
||||||
var (
|
var (
|
||||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||||
|
|
191
vendor/github.com/Azure/go-autorest/logger/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/go-autorest/logger/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
3
vendor/github.com/Azure/go-autorest/logger/go.mod
generated
vendored
Normal file
3
vendor/github.com/Azure/go-autorest/logger/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/Azure/go-autorest/logger
|
||||||
|
|
||||||
|
go 1.12
|
191
vendor/github.com/Azure/go-autorest/tracing/LICENSE
generated
vendored
Normal file
191
vendor/github.com/Azure/go-autorest/tracing/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2015 Microsoft Corporation
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
3
vendor/github.com/Azure/go-autorest/tracing/go.mod
generated
vendored
Normal file
3
vendor/github.com/Azure/go-autorest/tracing/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
module github.com/Azure/go-autorest/tracing
|
||||||
|
|
||||||
|
go 1.12
|
186
vendor/github.com/Azure/go-autorest/tracing/tracing.go
generated
vendored
186
vendor/github.com/Azure/go-autorest/tracing/tracing.go
generated
vendored
|
@ -16,180 +16,52 @@ package tracing
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
"contrib.go.opencensus.io/exporter/ocagent"
|
|
||||||
"go.opencensus.io/plugin/ochttp"
|
|
||||||
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Tracer represents an HTTP tracing facility.
|
||||||
|
type Tracer interface {
|
||||||
|
NewTransport(base *http.Transport) http.RoundTripper
|
||||||
|
StartSpan(ctx context.Context, name string) context.Context
|
||||||
|
EndSpan(ctx context.Context, httpStatusCode int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Transport is the default tracing RoundTripper. The custom options setter will control
|
tracer Tracer
|
||||||
// if traces are being emitted or not.
|
|
||||||
Transport = NewTransport()
|
|
||||||
|
|
||||||
// enabled is the flag for marking if tracing is enabled.
|
|
||||||
enabled = false
|
|
||||||
|
|
||||||
// Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
|
|
||||||
// it will be using the parent sampler or the default.
|
|
||||||
sampler = trace.NeverSample()
|
|
||||||
|
|
||||||
// Views for metric instrumentation.
|
|
||||||
views = map[string]*view.View{}
|
|
||||||
|
|
||||||
// the trace exporter
|
|
||||||
traceExporter trace.Exporter
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// Register will register the provided Tracer. Pass nil to unregister a Tracer.
|
||||||
enableFromEnv()
|
func Register(t Tracer) {
|
||||||
|
tracer = t
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableFromEnv() {
|
// IsEnabled returns true if a Tracer has been registered.
|
||||||
_, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
|
|
||||||
_, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
|
|
||||||
if ok || legacyOk {
|
|
||||||
agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
EnableWithAIForwarding(agentEndpoint)
|
|
||||||
} else {
|
|
||||||
Enable()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransport returns a new instance of a tracing-aware RoundTripper.
|
|
||||||
func NewTransport() *ochttp.Transport {
|
|
||||||
return &ochttp.Transport{
|
|
||||||
Propagation: &tracecontext.HTTPFormat{},
|
|
||||||
GetStartOptions: getStartOptions,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEnabled returns true if monitoring is enabled for the sdk.
|
|
||||||
func IsEnabled() bool {
|
func IsEnabled() bool {
|
||||||
return enabled
|
return tracer != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable will start instrumentation for metrics and traces.
|
// NewTransport creates a new instrumenting http.RoundTripper for the
|
||||||
func Enable() error {
|
// registered Tracer. If no Tracer has been registered it returns nil.
|
||||||
enabled = true
|
func NewTransport(base *http.Transport) http.RoundTripper {
|
||||||
sampler = nil
|
if tracer != nil {
|
||||||
|
return tracer.NewTransport(base)
|
||||||
err := initStats()
|
}
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable will disable instrumentation for metrics and traces.
|
// StartSpan starts a trace span with the specified name, associating it with the
|
||||||
func Disable() {
|
// provided context. Has no effect if a Tracer has not been registered.
|
||||||
disableStats()
|
|
||||||
sampler = trace.NeverSample()
|
|
||||||
if traceExporter != nil {
|
|
||||||
trace.UnregisterExporter(traceExporter)
|
|
||||||
}
|
|
||||||
enabled = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
|
|
||||||
// exporter making the metrics and traces available in app insights.
|
|
||||||
func EnableWithAIForwarding(agentEndpoint string) (err error) {
|
|
||||||
err = Enable()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
trace.RegisterExporter(traceExporter)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// getStartOptions is the custom options setter for the ochttp package.
|
|
||||||
func getStartOptions(*http.Request) trace.StartOptions {
|
|
||||||
return trace.StartOptions{
|
|
||||||
Sampler: sampler,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// initStats registers the views for the http metrics
|
|
||||||
func initStats() (err error) {
|
|
||||||
clientViews := []*view.View{
|
|
||||||
ochttp.ClientCompletedCount,
|
|
||||||
ochttp.ClientRoundtripLatencyDistribution,
|
|
||||||
ochttp.ClientReceivedBytesDistribution,
|
|
||||||
ochttp.ClientSentBytesDistribution,
|
|
||||||
}
|
|
||||||
for _, cv := range clientViews {
|
|
||||||
vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
|
|
||||||
views[vn] = cv.WithName(vn)
|
|
||||||
err = view.Register(views[vn])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// disableStats will unregister the previously registered metrics
|
|
||||||
func disableStats() {
|
|
||||||
for _, v := range views {
|
|
||||||
view.Unregister(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartSpan starts a trace span
|
|
||||||
func StartSpan(ctx context.Context, name string) context.Context {
|
func StartSpan(ctx context.Context, name string) context.Context {
|
||||||
ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
|
if tracer != nil {
|
||||||
|
return tracer.StartSpan(ctx, name)
|
||||||
|
}
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
// EndSpan ends a previously started span stored in the context
|
// EndSpan ends a previously started span stored in the context.
|
||||||
|
// Has no effect if a Tracer has not been registered.
|
||||||
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
|
func EndSpan(ctx context.Context, httpStatusCode int, err error) {
|
||||||
span := trace.FromContext(ctx)
|
if tracer != nil {
|
||||||
|
tracer.EndSpan(ctx, httpStatusCode, err)
|
||||||
if span == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
|
|
||||||
}
|
|
||||||
span.End()
|
|
||||||
}
|
|
||||||
|
|
||||||
// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
|
|
||||||
// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
|
|
||||||
func toTraceStatusCode(httpStatusCode int) int32 {
|
|
||||||
switch {
|
|
||||||
case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
|
|
||||||
return trace.StatusCodeOK
|
|
||||||
case httpStatusCode == http.StatusBadRequest:
|
|
||||||
return trace.StatusCodeInvalidArgument
|
|
||||||
case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
|
|
||||||
return trace.StatusCodeUnauthenticated
|
|
||||||
case httpStatusCode == http.StatusForbidden:
|
|
||||||
return trace.StatusCodePermissionDenied
|
|
||||||
case httpStatusCode == http.StatusNotFound:
|
|
||||||
return trace.StatusCodeNotFound
|
|
||||||
case httpStatusCode == http.StatusTooManyRequests:
|
|
||||||
return trace.StatusCodeResourceExhausted
|
|
||||||
case httpStatusCode == 499:
|
|
||||||
return trace.StatusCodeCancelled
|
|
||||||
case httpStatusCode == http.StatusNotImplemented:
|
|
||||||
return trace.StatusCodeUnimplemented
|
|
||||||
case httpStatusCode == http.StatusServiceUnavailable:
|
|
||||||
return trace.StatusCodeUnavailable
|
|
||||||
case httpStatusCode == http.StatusGatewayTimeout:
|
|
||||||
return trace.StatusCodeDeadlineExceeded
|
|
||||||
default:
|
|
||||||
return trace.StatusCodeUnknown
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
1
vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
generated
vendored
1
vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
generated
vendored
|
@ -1 +0,0 @@
|
||||||
Google Inc.
|
|
|
@ -1,356 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/agent/common/v1/common.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
type LibraryInfo_Language int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
|
|
||||||
LibraryInfo_CPP LibraryInfo_Language = 1
|
|
||||||
LibraryInfo_C_SHARP LibraryInfo_Language = 2
|
|
||||||
LibraryInfo_ERLANG LibraryInfo_Language = 3
|
|
||||||
LibraryInfo_GO_LANG LibraryInfo_Language = 4
|
|
||||||
LibraryInfo_JAVA LibraryInfo_Language = 5
|
|
||||||
LibraryInfo_NODE_JS LibraryInfo_Language = 6
|
|
||||||
LibraryInfo_PHP LibraryInfo_Language = 7
|
|
||||||
LibraryInfo_PYTHON LibraryInfo_Language = 8
|
|
||||||
LibraryInfo_RUBY LibraryInfo_Language = 9
|
|
||||||
)
|
|
||||||
|
|
||||||
var LibraryInfo_Language_name = map[int32]string{
|
|
||||||
0: "LANGUAGE_UNSPECIFIED",
|
|
||||||
1: "CPP",
|
|
||||||
2: "C_SHARP",
|
|
||||||
3: "ERLANG",
|
|
||||||
4: "GO_LANG",
|
|
||||||
5: "JAVA",
|
|
||||||
6: "NODE_JS",
|
|
||||||
7: "PHP",
|
|
||||||
8: "PYTHON",
|
|
||||||
9: "RUBY",
|
|
||||||
}
|
|
||||||
|
|
||||||
var LibraryInfo_Language_value = map[string]int32{
|
|
||||||
"LANGUAGE_UNSPECIFIED": 0,
|
|
||||||
"CPP": 1,
|
|
||||||
"C_SHARP": 2,
|
|
||||||
"ERLANG": 3,
|
|
||||||
"GO_LANG": 4,
|
|
||||||
"JAVA": 5,
|
|
||||||
"NODE_JS": 6,
|
|
||||||
"PHP": 7,
|
|
||||||
"PYTHON": 8,
|
|
||||||
"RUBY": 9,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x LibraryInfo_Language) String() string {
|
|
||||||
return proto.EnumName(LibraryInfo_Language_name, int32(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_126c72ed8a252c84, []int{2, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identifier metadata of the Node that produces the span or tracing data.
|
|
||||||
// Note, this is not the metadata about the Node or service that is described by associated spans.
|
|
||||||
// In the future we plan to extend the identifier proto definition to support
|
|
||||||
// additional information (e.g cloud id, etc.)
|
|
||||||
type Node struct {
|
|
||||||
// Identifier that uniquely identifies a process within a VM/container.
|
|
||||||
Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
|
|
||||||
// Information on the OpenCensus Library that initiates the stream.
|
|
||||||
LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
|
|
||||||
// Additional information on service.
|
|
||||||
ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
|
|
||||||
// Additional attributes.
|
|
||||||
Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Node) Reset() { *m = Node{} }
|
|
||||||
func (m *Node) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Node) ProtoMessage() {}
|
|
||||||
func (*Node) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_126c72ed8a252c84, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Node) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Node.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Node.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Node) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Node.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Node) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Node.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Node) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Node.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Node proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Node) GetIdentifier() *ProcessIdentifier {
|
|
||||||
if m != nil {
|
|
||||||
return m.Identifier
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Node) GetLibraryInfo() *LibraryInfo {
|
|
||||||
if m != nil {
|
|
||||||
return m.LibraryInfo
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Node) GetServiceInfo() *ServiceInfo {
|
|
||||||
if m != nil {
|
|
||||||
return m.ServiceInfo
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Node) GetAttributes() map[string]string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Attributes
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identifier that uniquely identifies a process within a VM/container.
|
|
||||||
type ProcessIdentifier struct {
|
|
||||||
// The host name. Usually refers to the machine/container name.
|
|
||||||
// For example: os.Hostname() in Go, socket.gethostname() in Python.
|
|
||||||
HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
|
|
||||||
// Process id.
|
|
||||||
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
|
||||||
// Start time of this ProcessIdentifier. Represented in epoch time.
|
|
||||||
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
|
|
||||||
func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ProcessIdentifier) ProtoMessage() {}
|
|
||||||
func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_126c72ed8a252c84, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ProcessIdentifier.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ProcessIdentifier) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ProcessIdentifier.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ProcessIdentifier) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ProcessIdentifier) GetHostName() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.HostName
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessIdentifier) GetPid() uint32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Pid
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
|
|
||||||
if m != nil {
|
|
||||||
return m.StartTimestamp
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Information on OpenCensus Library.
|
|
||||||
type LibraryInfo struct {
|
|
||||||
// Language of OpenCensus Library.
|
|
||||||
Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
|
|
||||||
// Version of Agent exporter of Library.
|
|
||||||
ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
|
|
||||||
// Version of OpenCensus Library.
|
|
||||||
CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
|
|
||||||
func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*LibraryInfo) ProtoMessage() {}
|
|
||||||
func (*LibraryInfo) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_126c72ed8a252c84, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *LibraryInfo) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_LibraryInfo.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *LibraryInfo) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_LibraryInfo.Size(m)
|
|
||||||
}
|
|
||||||
func (m *LibraryInfo) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
|
|
||||||
if m != nil {
|
|
||||||
return m.Language
|
|
||||||
}
|
|
||||||
return LibraryInfo_LANGUAGE_UNSPECIFIED
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LibraryInfo) GetExporterVersion() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.ExporterVersion
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *LibraryInfo) GetCoreLibraryVersion() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.CoreLibraryVersion
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additional service information.
|
|
||||||
type ServiceInfo struct {
|
|
||||||
// Name of the service.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
|
|
||||||
func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ServiceInfo) ProtoMessage() {}
|
|
||||||
func (*ServiceInfo) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_126c72ed8a252c84, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ServiceInfo) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ServiceInfo.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ServiceInfo) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ServiceInfo.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ServiceInfo) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ServiceInfo) GetName() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
|
|
||||||
proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
|
|
||||||
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
|
|
||||||
proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
|
|
||||||
proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
|
|
||||||
proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_126c72ed8a252c84 = []byte{
|
|
||||||
// 590 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
|
|
||||||
0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
|
|
||||||
0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
|
|
||||||
0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
|
|
||||||
0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
|
|
||||||
0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
|
|
||||||
0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
|
|
||||||
0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
|
|
||||||
0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
|
|
||||||
0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
|
|
||||||
0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
|
|
||||||
0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
|
|
||||||
0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
|
|
||||||
0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
|
|
||||||
0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
|
|
||||||
0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
|
|
||||||
0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
|
|
||||||
0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
|
|
||||||
0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
|
|
||||||
0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
|
|
||||||
0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
|
|
||||||
0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
|
|
||||||
0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
|
|
||||||
0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
|
|
||||||
0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
|
|
||||||
0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
|
|
||||||
0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
|
|
||||||
0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
|
|
||||||
0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
|
|
||||||
0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
|
|
||||||
0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
|
|
||||||
0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
|
|
||||||
0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
|
|
||||||
0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
|
|
||||||
0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
|
|
||||||
0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
|
|
||||||
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
|
|
||||||
}
|
|
|
@ -1,264 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
fmt "fmt"
|
|
||||||
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
|
||||||
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
|
||||||
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
type ExportMetricsServiceRequest struct {
|
|
||||||
// This is required only in the first message on the stream or if the
|
|
||||||
// previous sent ExportMetricsServiceRequest message has a different Node (e.g.
|
|
||||||
// when the same RPC is used to send Metrics from multiple Applications).
|
|
||||||
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
|
||||||
// A list of metrics that belong to the last received Node.
|
|
||||||
Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
|
||||||
// The resource for the metrics in this message that do not have an explicit
|
|
||||||
// resource set.
|
|
||||||
// If unset, the most recently set resource in the RPC stream applies. It is
|
|
||||||
// valid to never be set within a stream, e.g. when no resource info is known
|
|
||||||
// at all or when all sent metrics have an explicit resource set.
|
|
||||||
Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
|
|
||||||
func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ExportMetricsServiceRequest) ProtoMessage() {}
|
|
||||||
func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_47e253a956287d04, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) GetNode() *v1.Node {
|
|
||||||
if m != nil {
|
|
||||||
return m.Node
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
|
|
||||||
if m != nil {
|
|
||||||
return m.Metrics
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource {
|
|
||||||
if m != nil {
|
|
||||||
return m.Resource
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExportMetricsServiceResponse struct {
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
|
|
||||||
func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ExportMetricsServiceResponse) ProtoMessage() {}
|
|
||||||
func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_47e253a956287d04, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ExportMetricsServiceResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest")
|
|
||||||
proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_47e253a956287d04 = []byte{
|
|
||||||
// 340 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40,
|
|
||||||
0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45,
|
|
||||||
0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f,
|
|
||||||
0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24,
|
|
||||||
0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26,
|
|
||||||
0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5,
|
|
||||||
0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09,
|
|
||||||
0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54,
|
|
||||||
0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1,
|
|
||||||
0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4,
|
|
||||||
0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7,
|
|
||||||
0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c,
|
|
||||||
0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58,
|
|
||||||
0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6,
|
|
||||||
0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b,
|
|
||||||
0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb,
|
|
||||||
0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90,
|
|
||||||
0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a,
|
|
||||||
0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab,
|
|
||||||
0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa,
|
|
||||||
0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3,
|
|
||||||
0x1b, 0x03, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// MetricsServiceClient is the client API for MetricsService service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type MetricsServiceClient interface {
|
|
||||||
// For performance reasons, it is recommended to keep this RPC
|
|
||||||
// alive for the entire life of the application.
|
|
||||||
Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricsServiceClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
|
|
||||||
return &metricsServiceClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
|
|
||||||
stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &metricsServiceExportClient{stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricsService_ExportClient interface {
|
|
||||||
Send(*ExportMetricsServiceRequest) error
|
|
||||||
Recv() (*ExportMetricsServiceResponse, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricsServiceExportClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
|
|
||||||
return x.ClientStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
|
|
||||||
m := new(ExportMetricsServiceResponse)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetricsServiceServer is the server API for MetricsService service.
|
|
||||||
type MetricsServiceServer interface {
|
|
||||||
// For performance reasons, it is recommended to keep this RPC
|
|
||||||
// alive for the entire life of the application.
|
|
||||||
Export(MetricsService_ExportServer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
|
|
||||||
s.RegisterService(&_MetricsService_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricsService_ExportServer interface {
|
|
||||||
Send(*ExportMetricsServiceResponse) error
|
|
||||||
Recv() (*ExportMetricsServiceRequest, error)
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricsServiceExportServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
|
|
||||||
m := new(ExportMetricsServiceRequest)
|
|
||||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _MetricsService_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
|
|
||||||
HandlerType: (*MetricsServiceServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{},
|
|
||||||
Streams: []grpc.StreamDesc{
|
|
||||||
{
|
|
||||||
StreamName: "Export",
|
|
||||||
Handler: _MetricsService_Export_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
|
|
||||||
}
|
|
|
@ -1,443 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/agent/trace/v1/trace_service.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
fmt "fmt"
|
|
||||||
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
|
||||||
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
|
||||||
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
type CurrentLibraryConfig struct {
|
|
||||||
// This is required only in the first message on the stream or if the
|
|
||||||
// previous sent CurrentLibraryConfig message has a different Node (e.g.
|
|
||||||
// when the same RPC is used to configure multiple Applications).
|
|
||||||
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
|
||||||
// Current configuration.
|
|
||||||
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
|
|
||||||
func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*CurrentLibraryConfig) ProtoMessage() {}
|
|
||||||
func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_7027f99caf7ac6a5, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *CurrentLibraryConfig) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_CurrentLibraryConfig.Size(m)
|
|
||||||
}
|
|
||||||
func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *CurrentLibraryConfig) GetNode() *v1.Node {
|
|
||||||
if m != nil {
|
|
||||||
return m.Node
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
|
|
||||||
if m != nil {
|
|
||||||
return m.Config
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdatedLibraryConfig struct {
|
|
||||||
// This field is ignored when the RPC is used to configure only one Application.
|
|
||||||
// This is required only in the first message on the stream or if the
|
|
||||||
// previous sent UpdatedLibraryConfig message has a different Node.
|
|
||||||
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
|
||||||
// Requested updated configuration.
|
|
||||||
Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
|
|
||||||
func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UpdatedLibraryConfig) ProtoMessage() {}
|
|
||||||
func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_7027f99caf7ac6a5, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UpdatedLibraryConfig) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
|
|
||||||
if m != nil {
|
|
||||||
return m.Node
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
|
|
||||||
if m != nil {
|
|
||||||
return m.Config
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExportTraceServiceRequest struct {
|
|
||||||
// This is required only in the first message on the stream or if the
|
|
||||||
// previous sent ExportTraceServiceRequest message has a different Node (e.g.
|
|
||||||
// when the same RPC is used to send Spans from multiple Applications).
|
|
||||||
Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
|
||||||
// A list of Spans that belong to the last received Node.
|
|
||||||
Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
|
|
||||||
// The resource for the spans in this message that do not have an explicit
|
|
||||||
// resource set.
|
|
||||||
// If unset, the most recently set resource in the RPC stream applies. It is
|
|
||||||
// valid to never be set within a stream, e.g. when no resource info is known.
|
|
||||||
Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
|
|
||||||
func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ExportTraceServiceRequest) ProtoMessage() {}
|
|
||||||
func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_7027f99caf7ac6a5, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
|
|
||||||
if m != nil {
|
|
||||||
return m.Node
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
|
|
||||||
if m != nil {
|
|
||||||
return m.Spans
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
|
|
||||||
if m != nil {
|
|
||||||
return m.Resource
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ExportTraceServiceResponse struct {
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
|
|
||||||
func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ExportTraceServiceResponse) ProtoMessage() {}
|
|
||||||
func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_7027f99caf7ac6a5, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
|
|
||||||
proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
|
|
||||||
proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
|
|
||||||
proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_7027f99caf7ac6a5 = []byte{
|
|
||||||
// 423 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40,
|
|
||||||
0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a,
|
|
||||||
0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8,
|
|
||||||
0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3,
|
|
||||||
0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d,
|
|
||||||
0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49,
|
|
||||||
0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91,
|
|
||||||
0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d,
|
|
||||||
0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7,
|
|
||||||
0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf,
|
|
||||||
0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73,
|
|
||||||
0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1,
|
|
||||||
0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31,
|
|
||||||
0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95,
|
|
||||||
0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d,
|
|
||||||
0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90,
|
|
||||||
0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d,
|
|
||||||
0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d,
|
|
||||||
0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89,
|
|
||||||
0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44,
|
|
||||||
0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e,
|
|
||||||
0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae,
|
|
||||||
0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59,
|
|
||||||
0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94,
|
|
||||||
0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00,
|
|
||||||
0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf,
|
|
||||||
0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConn
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion4
|
|
||||||
|
|
||||||
// TraceServiceClient is the client API for TraceService service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type TraceServiceClient interface {
|
|
||||||
// After initialization, this RPC must be kept alive for the entire life of
|
|
||||||
// the application. The agent pushes configs down to applications via a
|
|
||||||
// stream.
|
|
||||||
Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
|
|
||||||
// For performance reasons, it is recommended to keep this RPC
|
|
||||||
// alive for the entire life of the application.
|
|
||||||
Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type traceServiceClient struct {
|
|
||||||
cc *grpc.ClientConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
|
|
||||||
return &traceServiceClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
|
|
||||||
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &traceServiceConfigClient{stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceService_ConfigClient interface {
|
|
||||||
Send(*CurrentLibraryConfig) error
|
|
||||||
Recv() (*UpdatedLibraryConfig, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type traceServiceConfigClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
|
|
||||||
return x.ClientStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
|
|
||||||
m := new(UpdatedLibraryConfig)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
|
|
||||||
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
x := &traceServiceExportClient{stream}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceService_ExportClient interface {
|
|
||||||
Send(*ExportTraceServiceRequest) error
|
|
||||||
Recv() (*ExportTraceServiceResponse, error)
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type traceServiceExportClient struct {
|
|
||||||
grpc.ClientStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
|
|
||||||
return x.ClientStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
|
|
||||||
m := new(ExportTraceServiceResponse)
|
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TraceServiceServer is the server API for TraceService service.
|
|
||||||
type TraceServiceServer interface {
|
|
||||||
// After initialization, this RPC must be kept alive for the entire life of
|
|
||||||
// the application. The agent pushes configs down to applications via a
|
|
||||||
// stream.
|
|
||||||
Config(TraceService_ConfigServer) error
|
|
||||||
// For performance reasons, it is recommended to keep this RPC
|
|
||||||
// alive for the entire life of the application.
|
|
||||||
Export(TraceService_ExportServer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
|
|
||||||
s.RegisterService(&_TraceService_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceService_ConfigServer interface {
|
|
||||||
Send(*UpdatedLibraryConfig) error
|
|
||||||
Recv() (*CurrentLibraryConfig, error)
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type traceServiceConfigServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
|
|
||||||
m := new(CurrentLibraryConfig)
|
|
||||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
|
|
||||||
return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceService_ExportServer interface {
|
|
||||||
Send(*ExportTraceServiceResponse) error
|
|
||||||
Recv() (*ExportTraceServiceRequest, error)
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
type traceServiceExportServer struct {
|
|
||||||
grpc.ServerStream
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
|
|
||||||
return x.ServerStream.SendMsg(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
|
|
||||||
m := new(ExportTraceServiceRequest)
|
|
||||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _TraceService_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
|
|
||||||
HandlerType: (*TraceServiceServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{},
|
|
||||||
Streams: []grpc.StreamDesc{
|
|
||||||
{
|
|
||||||
StreamName: "Config",
|
|
||||||
Handler: _TraceService_Config_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
StreamName: "Export",
|
|
||||||
Handler: _TraceService_Export_Handler,
|
|
||||||
ServerStreams: true,
|
|
||||||
ClientStreams: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
|
|
||||||
}
|
|
|
@ -1,154 +0,0 @@
|
||||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/agent/trace/v1/trace_service.proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package v1 is a reverse proxy.
|
|
||||||
|
|
||||||
It translates gRPC into RESTful JSON APIs.
|
|
||||||
*/
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ codes.Code
|
|
||||||
var _ io.Reader
|
|
||||||
var _ status.Status
|
|
||||||
var _ = runtime.String
|
|
||||||
var _ = utilities.NewDoubleArray
|
|
||||||
|
|
||||||
func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
|
|
||||||
var metadata runtime.ServerMetadata
|
|
||||||
stream, err := client.Export(ctx)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("Failed to start streaming: %v", err)
|
|
||||||
return nil, metadata, err
|
|
||||||
}
|
|
||||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
|
||||||
if berr != nil {
|
|
||||||
return nil, metadata, berr
|
|
||||||
}
|
|
||||||
dec := marshaler.NewDecoder(newReader())
|
|
||||||
handleSend := func() error {
|
|
||||||
var protoReq ExportTraceServiceRequest
|
|
||||||
err := dec.Decode(&protoReq)
|
|
||||||
if err == io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("Failed to decode request: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := stream.Send(&protoReq); err != nil {
|
|
||||||
grpclog.Infof("Failed to send request: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := handleSend(); err != nil {
|
|
||||||
if cerr := stream.CloseSend(); cerr != nil {
|
|
||||||
grpclog.Infof("Failed to terminate client stream: %v", cerr)
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
return stream, metadata, nil
|
|
||||||
}
|
|
||||||
return nil, metadata, err
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if err := handleSend(); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := stream.CloseSend(); err != nil {
|
|
||||||
grpclog.Infof("Failed to terminate client stream: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
header, err := stream.Header()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("Failed to get header from client: %v", err)
|
|
||||||
return nil, metadata, err
|
|
||||||
}
|
|
||||||
metadata.HeaderMD = header
|
|
||||||
return stream, metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
|
|
||||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
|
||||||
func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
|
||||||
conn, err := grpc.Dial(endpoint, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if cerr := conn.Close(); cerr != nil {
|
|
||||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
<-ctx.Done()
|
|
||||||
if cerr := conn.Close(); cerr != nil {
|
|
||||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return RegisterTraceServiceHandler(ctx, mux, conn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
|
|
||||||
// The handlers forward requests to the grpc endpoint over "conn".
|
|
||||||
func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
|
||||||
return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
|
|
||||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
|
|
||||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
|
|
||||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
|
||||||
// "TraceServiceClient" to call the correct interceptors.
|
|
||||||
func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
|
|
||||||
|
|
||||||
mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
|
||||||
ctx, cancel := context.WithCancel(req.Context())
|
|
||||||
defer cancel()
|
|
||||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
|
||||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
|
|
||||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
|
||||||
if err != nil {
|
|
||||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
forward_TraceService_Export_0 = runtime.ForwardResponseStream
|
|
||||||
)
|
|
1126
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
generated
vendored
1126
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
|
@ -1,99 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/resource/v1/resource.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Resource information.
|
|
||||||
type Resource struct {
|
|
||||||
// Type identifier for the resource.
|
|
||||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
|
||||||
// Set of labels that describe the resource.
|
|
||||||
Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) Reset() { *m = Resource{} }
|
|
||||||
func (m *Resource) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Resource) ProtoMessage() {}
|
|
||||||
func (*Resource) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_584700775a2fc762, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Resource.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Resource) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Resource.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Resource) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Resource.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Resource) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Resource.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Resource proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Resource) GetType() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Type
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) GetLabels() map[string]string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Labels
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
|
|
||||||
proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_584700775a2fc762 = []byte{
|
|
||||||
// 234 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
|
|
||||||
0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
|
|
||||||
0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
|
|
||||||
0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
|
|
||||||
0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
|
|
||||||
0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
|
|
||||||
0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
|
|
||||||
0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
|
|
||||||
0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
|
|
||||||
0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
|
|
||||||
0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
|
|
||||||
0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
|
|
||||||
0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
|
|
||||||
0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
|
|
||||||
0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
1543
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
generated
vendored
1543
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
358
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
generated
vendored
358
vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
generated
vendored
|
@ -1,358 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: opencensus/proto/trace/v1/trace_config.proto
|
|
||||||
|
|
||||||
package v1
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// How spans should be sampled:
|
|
||||||
// - Always off
|
|
||||||
// - Always on
|
|
||||||
// - Always follow the parent Span's decision (off if no parent).
|
|
||||||
type ConstantSampler_ConstantDecision int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
|
|
||||||
ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
|
|
||||||
ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
var ConstantSampler_ConstantDecision_name = map[int32]string{
|
|
||||||
0: "ALWAYS_OFF",
|
|
||||||
1: "ALWAYS_ON",
|
|
||||||
2: "ALWAYS_PARENT",
|
|
||||||
}
|
|
||||||
|
|
||||||
var ConstantSampler_ConstantDecision_value = map[string]int32{
|
|
||||||
"ALWAYS_OFF": 0,
|
|
||||||
"ALWAYS_ON": 1,
|
|
||||||
"ALWAYS_PARENT": 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x ConstantSampler_ConstantDecision) String() string {
|
|
||||||
return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5359209b41ff50c5, []int{2, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Global configuration of the trace service. All fields must be specified, or
|
|
||||||
// the default (zero) values will be used for each type.
|
|
||||||
type TraceConfig struct {
|
|
||||||
// The global default sampler used to make decisions on span sampling.
|
|
||||||
//
|
|
||||||
// Types that are valid to be assigned to Sampler:
|
|
||||||
// *TraceConfig_ProbabilitySampler
|
|
||||||
// *TraceConfig_ConstantSampler
|
|
||||||
// *TraceConfig_RateLimitingSampler
|
|
||||||
Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
|
|
||||||
// The global default max number of attributes per span.
|
|
||||||
MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
|
|
||||||
// The global default max number of annotation events per span.
|
|
||||||
MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
|
|
||||||
// The global default max number of message events per span.
|
|
||||||
MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
|
|
||||||
// The global default max number of link entries per span.
|
|
||||||
MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) Reset() { *m = TraceConfig{} }
|
|
||||||
func (m *TraceConfig) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*TraceConfig) ProtoMessage() {}
|
|
||||||
func (*TraceConfig) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5359209b41ff50c5, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_TraceConfig.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *TraceConfig) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_TraceConfig.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *TraceConfig) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_TraceConfig.Size(m)
|
|
||||||
}
|
|
||||||
func (m *TraceConfig) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_TraceConfig.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_TraceConfig proto.InternalMessageInfo
|
|
||||||
|
|
||||||
type isTraceConfig_Sampler interface {
|
|
||||||
isTraceConfig_Sampler()
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceConfig_ProbabilitySampler struct {
|
|
||||||
ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceConfig_ConstantSampler struct {
|
|
||||||
ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TraceConfig_RateLimitingSampler struct {
|
|
||||||
RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
|
|
||||||
|
|
||||||
func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
|
|
||||||
|
|
||||||
func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
|
|
||||||
if m != nil {
|
|
||||||
return m.Sampler
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
|
|
||||||
if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
|
|
||||||
return x.ProbabilitySampler
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetConstantSampler() *ConstantSampler {
|
|
||||||
if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok {
|
|
||||||
return x.ConstantSampler
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
|
|
||||||
if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
|
|
||||||
return x.RateLimitingSampler
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetMaxNumberOfAttributes() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.MaxNumberOfAttributes
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.MaxNumberOfAnnotations
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.MaxNumberOfMessageEvents
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.MaxNumberOfLinks
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
|
||||||
func (*TraceConfig) XXX_OneofWrappers() []interface{} {
|
|
||||||
return []interface{}{
|
|
||||||
(*TraceConfig_ProbabilitySampler)(nil),
|
|
||||||
(*TraceConfig_ConstantSampler)(nil),
|
|
||||||
(*TraceConfig_RateLimitingSampler)(nil),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sampler that tries to uniformly sample traces with a given probability.
|
|
||||||
// The probability of sampling a trace is equal to that of the specified probability.
|
|
||||||
type ProbabilitySampler struct {
|
|
||||||
// The desired probability of sampling. Must be within [0.0, 1.0].
|
|
||||||
SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} }
|
|
||||||
func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ProbabilitySampler) ProtoMessage() {}
|
|
||||||
func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5359209b41ff50c5, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ProbabilitySampler) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ProbabilitySampler.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ProbabilitySampler) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ProbabilitySampler.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ProbabilitySampler) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ProbabilitySampler) GetSamplingProbability() float64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.SamplingProbability
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sampler that always makes a constant decision on span sampling.
|
|
||||||
type ConstantSampler struct {
|
|
||||||
Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
|
|
||||||
func (m *ConstantSampler) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ConstantSampler) ProtoMessage() {}
|
|
||||||
func (*ConstantSampler) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5359209b41ff50c5, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ConstantSampler) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ConstantSampler.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ConstantSampler) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ConstantSampler.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ConstantSampler) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ConstantSampler.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ConstantSampler) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ConstantSampler.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
|
|
||||||
if m != nil {
|
|
||||||
return m.Decision
|
|
||||||
}
|
|
||||||
return ConstantSampler_ALWAYS_OFF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sampler that tries to sample with a rate per time window.
|
|
||||||
type RateLimitingSampler struct {
|
|
||||||
// Rate per second.
|
|
||||||
Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} }
|
|
||||||
func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*RateLimitingSampler) ProtoMessage() {}
|
|
||||||
func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5359209b41ff50c5, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *RateLimitingSampler) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_RateLimitingSampler.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *RateLimitingSampler) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_RateLimitingSampler.Size(m)
|
|
||||||
}
|
|
||||||
func (m *RateLimitingSampler) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *RateLimitingSampler) GetQps() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Qps
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value)
|
|
||||||
proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
|
|
||||||
proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
|
|
||||||
proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
|
|
||||||
proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_5359209b41ff50c5 = []byte{
|
|
||||||
// 486 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40,
|
|
||||||
0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa,
|
|
||||||
0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4,
|
|
||||||
0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4,
|
|
||||||
0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33,
|
|
||||||
0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98,
|
|
||||||
0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52,
|
|
||||||
0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9,
|
|
||||||
0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc,
|
|
||||||
0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d,
|
|
||||||
0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d,
|
|
||||||
0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90,
|
|
||||||
0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e,
|
|
||||||
0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85,
|
|
||||||
0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71,
|
|
||||||
0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00,
|
|
||||||
0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71,
|
|
||||||
0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37,
|
|
||||||
0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8,
|
|
||||||
0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e,
|
|
||||||
0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04,
|
|
||||||
0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4,
|
|
||||||
0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39,
|
|
||||||
0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe,
|
|
||||||
0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d,
|
|
||||||
0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49,
|
|
||||||
0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd,
|
|
||||||
0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49,
|
|
||||||
0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30,
|
|
||||||
0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2,
|
|
||||||
0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00,
|
|
||||||
}
|
|
1271
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
1271
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
File diff suppressed because it is too large
Load diff
2887
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
2887
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
883
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
883
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
|
@ -1,883 +0,0 @@
|
||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Author: kenton@google.com (Kenton Varda)
|
|
||||||
// Based on original Protocol Buffers design by
|
|
||||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
|
||||||
//
|
|
||||||
// The messages in this file describe the definitions found in .proto files.
|
|
||||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
|
||||||
// without any other information (e.g. without reading its imports).
|
|
||||||
|
|
||||||
|
|
||||||
syntax = "proto2";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "DescriptorProtos";
|
|
||||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
|
|
||||||
// descriptor.proto must be optimized for speed because reflection-based
|
|
||||||
// algorithms don't work during bootstrapping.
|
|
||||||
option optimize_for = SPEED;
|
|
||||||
|
|
||||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
|
||||||
// files it parses.
|
|
||||||
message FileDescriptorSet {
|
|
||||||
repeated FileDescriptorProto file = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a complete .proto file.
|
|
||||||
message FileDescriptorProto {
|
|
||||||
optional string name = 1; // file name, relative to root of source tree
|
|
||||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
|
||||||
|
|
||||||
// Names of files imported by this file.
|
|
||||||
repeated string dependency = 3;
|
|
||||||
// Indexes of the public imported files in the dependency list above.
|
|
||||||
repeated int32 public_dependency = 10;
|
|
||||||
// Indexes of the weak imported files in the dependency list.
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
repeated int32 weak_dependency = 11;
|
|
||||||
|
|
||||||
// All top-level definitions in this file.
|
|
||||||
repeated DescriptorProto message_type = 4;
|
|
||||||
repeated EnumDescriptorProto enum_type = 5;
|
|
||||||
repeated ServiceDescriptorProto service = 6;
|
|
||||||
repeated FieldDescriptorProto extension = 7;
|
|
||||||
|
|
||||||
optional FileOptions options = 8;
|
|
||||||
|
|
||||||
// This field contains optional information about the original source code.
|
|
||||||
// You may safely remove this entire field without harming runtime
|
|
||||||
// functionality of the descriptors -- the information is needed only by
|
|
||||||
// development tools.
|
|
||||||
optional SourceCodeInfo source_code_info = 9;
|
|
||||||
|
|
||||||
// The syntax of the proto file.
|
|
||||||
// The supported values are "proto2" and "proto3".
|
|
||||||
optional string syntax = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a message type.
|
|
||||||
message DescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated FieldDescriptorProto field = 2;
|
|
||||||
repeated FieldDescriptorProto extension = 6;
|
|
||||||
|
|
||||||
repeated DescriptorProto nested_type = 3;
|
|
||||||
repeated EnumDescriptorProto enum_type = 4;
|
|
||||||
|
|
||||||
message ExtensionRange {
|
|
||||||
optional int32 start = 1;
|
|
||||||
optional int32 end = 2;
|
|
||||||
|
|
||||||
optional ExtensionRangeOptions options = 3;
|
|
||||||
}
|
|
||||||
repeated ExtensionRange extension_range = 5;
|
|
||||||
|
|
||||||
repeated OneofDescriptorProto oneof_decl = 8;
|
|
||||||
|
|
||||||
optional MessageOptions options = 7;
|
|
||||||
|
|
||||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
|
||||||
// fields or extension ranges in the same message. Reserved ranges may
|
|
||||||
// not overlap.
|
|
||||||
message ReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Exclusive.
|
|
||||||
}
|
|
||||||
repeated ReservedRange reserved_range = 9;
|
|
||||||
// Reserved field names, which may not be used by fields in the same message.
|
|
||||||
// A given name may only be reserved once.
|
|
||||||
repeated string reserved_name = 10;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ExtensionRangeOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a field within a message.
|
|
||||||
message FieldDescriptorProto {
|
|
||||||
enum Type {
|
|
||||||
// 0 is reserved for errors.
|
|
||||||
// Order is weird for historical reasons.
|
|
||||||
TYPE_DOUBLE = 1;
|
|
||||||
TYPE_FLOAT = 2;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT64 = 3;
|
|
||||||
TYPE_UINT64 = 4;
|
|
||||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
|
||||||
// negative values are likely.
|
|
||||||
TYPE_INT32 = 5;
|
|
||||||
TYPE_FIXED64 = 6;
|
|
||||||
TYPE_FIXED32 = 7;
|
|
||||||
TYPE_BOOL = 8;
|
|
||||||
TYPE_STRING = 9;
|
|
||||||
// Tag-delimited aggregate.
|
|
||||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
|
||||||
// implementations should still be able to parse the group wire format and
|
|
||||||
// treat group fields as unknown fields.
|
|
||||||
TYPE_GROUP = 10;
|
|
||||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
|
||||||
|
|
||||||
// New in version 2.
|
|
||||||
TYPE_BYTES = 12;
|
|
||||||
TYPE_UINT32 = 13;
|
|
||||||
TYPE_ENUM = 14;
|
|
||||||
TYPE_SFIXED32 = 15;
|
|
||||||
TYPE_SFIXED64 = 16;
|
|
||||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
|
||||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
|
||||||
};
|
|
||||||
|
|
||||||
enum Label {
|
|
||||||
// 0 is reserved for errors
|
|
||||||
LABEL_OPTIONAL = 1;
|
|
||||||
LABEL_REQUIRED = 2;
|
|
||||||
LABEL_REPEATED = 3;
|
|
||||||
};
|
|
||||||
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 3;
|
|
||||||
optional Label label = 4;
|
|
||||||
|
|
||||||
// If type_name is set, this need not be set. If both this and type_name
|
|
||||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
|
||||||
optional Type type = 5;
|
|
||||||
|
|
||||||
// For message and enum types, this is the name of the type. If the name
|
|
||||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
|
||||||
// rules are used to find the type (i.e. first the nested types within this
|
|
||||||
// message are searched, then within the parent, on up to the root
|
|
||||||
// namespace).
|
|
||||||
optional string type_name = 6;
|
|
||||||
|
|
||||||
// For extensions, this is the name of the type being extended. It is
|
|
||||||
// resolved in the same manner as type_name.
|
|
||||||
optional string extendee = 2;
|
|
||||||
|
|
||||||
// For numeric types, contains the original text representation of the value.
|
|
||||||
// For booleans, "true" or "false".
|
|
||||||
// For strings, contains the default text contents (not escaped in any way).
|
|
||||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
|
||||||
// TODO(kenton): Base-64 encode?
|
|
||||||
optional string default_value = 7;
|
|
||||||
|
|
||||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
|
||||||
// list. This field is a member of that oneof.
|
|
||||||
optional int32 oneof_index = 9;
|
|
||||||
|
|
||||||
// JSON name of this field. The value is set by protocol compiler. If the
|
|
||||||
// user has set a "json_name" option on this field, that option's value
|
|
||||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
|
||||||
// it to camelCase.
|
|
||||||
optional string json_name = 10;
|
|
||||||
|
|
||||||
optional FieldOptions options = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a oneof.
|
|
||||||
message OneofDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional OneofOptions options = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes an enum type.
|
|
||||||
message EnumDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
repeated EnumValueDescriptorProto value = 2;
|
|
||||||
|
|
||||||
optional EnumOptions options = 3;
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved values may not be used by
|
|
||||||
// entries in the same enum. Reserved ranges may not overlap.
|
|
||||||
//
|
|
||||||
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
|
||||||
// is inclusive such that it can appropriately represent the entire int32
|
|
||||||
// domain.
|
|
||||||
message EnumReservedRange {
|
|
||||||
optional int32 start = 1; // Inclusive.
|
|
||||||
optional int32 end = 2; // Inclusive.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range of reserved numeric values. Reserved numeric values may not be used
|
|
||||||
// by enum values in the same enum declaration. Reserved ranges may not
|
|
||||||
// overlap.
|
|
||||||
repeated EnumReservedRange reserved_range = 4;
|
|
||||||
|
|
||||||
// Reserved enum value names, which may not be reused. A given name may only
|
|
||||||
// be reserved once.
|
|
||||||
repeated string reserved_name = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a value within an enum.
|
|
||||||
message EnumValueDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
optional int32 number = 2;
|
|
||||||
|
|
||||||
optional EnumValueOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a service.
|
|
||||||
message ServiceDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
repeated MethodDescriptorProto method = 2;
|
|
||||||
|
|
||||||
optional ServiceOptions options = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes a method of a service.
|
|
||||||
message MethodDescriptorProto {
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
// Input and output type names. These are resolved in the same way as
|
|
||||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
|
||||||
optional string input_type = 2;
|
|
||||||
optional string output_type = 3;
|
|
||||||
|
|
||||||
optional MethodOptions options = 4;
|
|
||||||
|
|
||||||
// Identifies if client streams multiple client messages
|
|
||||||
optional bool client_streaming = 5 [default=false];
|
|
||||||
// Identifies if server streams multiple server messages
|
|
||||||
optional bool server_streaming = 6 [default=false];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Options
|
|
||||||
|
|
||||||
// Each of the definitions above may have "options" attached. These are
|
|
||||||
// just annotations which may cause code to be generated slightly differently
|
|
||||||
// or may contain hints for code that manipulates protocol messages.
|
|
||||||
//
|
|
||||||
// Clients may define custom options as extensions of the *Options messages.
|
|
||||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
|
||||||
// store the values in them. Instead it stores them in a field in the *Options
|
|
||||||
// message called uninterpreted_option. This field must have the same name
|
|
||||||
// across all *Options messages. We then use this field to populate the
|
|
||||||
// extensions when we build a descriptor, at which point all protos have been
|
|
||||||
// parsed and so all extensions are known.
|
|
||||||
//
|
|
||||||
// Extension numbers for custom options may be chosen as follows:
|
|
||||||
// * For options which will only be used within a single application or
|
|
||||||
// organization, or for experimental options, use field numbers 50000
|
|
||||||
// through 99999. It is up to you to ensure that you do not use the
|
|
||||||
// same number for multiple options.
|
|
||||||
// * For options which will be published and used publicly by multiple
|
|
||||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
|
||||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
|
||||||
// Objective-C plugin) and your project website (if available) -- there's no
|
|
||||||
// need to explain how you intend to use them. Usually you only need one
|
|
||||||
// extension number. You can declare multiple options with only one extension
|
|
||||||
// number by putting them in a sub-message. See the Custom Options section of
|
|
||||||
// the docs for examples:
|
|
||||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
|
||||||
// If this turns out to be popular, a web service will be set up
|
|
||||||
// to automatically assign option numbers.
|
|
||||||
|
|
||||||
|
|
||||||
message FileOptions {
|
|
||||||
|
|
||||||
// Sets the Java package where classes generated from this .proto will be
|
|
||||||
// placed. By default, the proto package is used, but this is often
|
|
||||||
// inappropriate because proto packages do not normally start with backwards
|
|
||||||
// domain names.
|
|
||||||
optional string java_package = 1;
|
|
||||||
|
|
||||||
|
|
||||||
// If set, all the classes from the .proto file are wrapped in a single
|
|
||||||
// outer class with the given name. This applies to both Proto1
|
|
||||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
|
||||||
// a .proto always translates to a single class, but you may want to
|
|
||||||
// explicitly choose the class name).
|
|
||||||
optional string java_outer_classname = 8;
|
|
||||||
|
|
||||||
// If set true, then the Java code generator will generate a separate .java
|
|
||||||
// file for each top-level message, enum, and service defined in the .proto
|
|
||||||
// file. Thus, these types will *not* be nested inside the outer class
|
|
||||||
// named by java_outer_classname. However, the outer class will still be
|
|
||||||
// generated to contain the file's getDescriptor() method as well as any
|
|
||||||
// top-level extensions defined in the file.
|
|
||||||
optional bool java_multiple_files = 10 [default=false];
|
|
||||||
|
|
||||||
// This option does nothing.
|
|
||||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
|
||||||
|
|
||||||
// If set true, then the Java2 code generator will generate code that
|
|
||||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
|
||||||
// byte sequence to a string field.
|
|
||||||
// Message reflection will do the same.
|
|
||||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
|
||||||
// This option has no effect on when used with the lite runtime.
|
|
||||||
optional bool java_string_check_utf8 = 27 [default=false];
|
|
||||||
|
|
||||||
|
|
||||||
// Generated classes can be optimized for speed or code size.
|
|
||||||
enum OptimizeMode {
|
|
||||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
|
||||||
// etc.
|
|
||||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
|
||||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
|
||||||
}
|
|
||||||
optional OptimizeMode optimize_for = 9 [default=SPEED];
|
|
||||||
|
|
||||||
// Sets the Go package where structs generated from this .proto will be
|
|
||||||
// placed. If omitted, the Go package will be derived from the following:
|
|
||||||
// - The basename of the package import path, if provided.
|
|
||||||
// - Otherwise, the package statement in the .proto file, if present.
|
|
||||||
// - Otherwise, the basename of the .proto file, without extension.
|
|
||||||
optional string go_package = 11;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Should generic services be generated in each language? "Generic" services
|
|
||||||
// are not specific to any particular RPC system. They are generated by the
|
|
||||||
// main code generators in each language (without additional plugins).
|
|
||||||
// Generic services were the only kind of service generation supported by
|
|
||||||
// early versions of google.protobuf.
|
|
||||||
//
|
|
||||||
// Generic services are now considered deprecated in favor of using plugins
|
|
||||||
// that generate code specific to your particular RPC system. Therefore,
|
|
||||||
// these default to false. Old code which depends on generic services should
|
|
||||||
// explicitly set them to true.
|
|
||||||
optional bool cc_generic_services = 16 [default=false];
|
|
||||||
optional bool java_generic_services = 17 [default=false];
|
|
||||||
optional bool py_generic_services = 18 [default=false];
|
|
||||||
optional bool php_generic_services = 42 [default=false];
|
|
||||||
|
|
||||||
// Is this file deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for everything in the file, or it will be completely ignored; in the very
|
|
||||||
// least, this is a formalization for deprecating files.
|
|
||||||
optional bool deprecated = 23 [default=false];
|
|
||||||
|
|
||||||
// Enables the use of arenas for the proto messages in this file. This applies
|
|
||||||
// only to generated classes for C++.
|
|
||||||
optional bool cc_enable_arenas = 31 [default=false];
|
|
||||||
|
|
||||||
|
|
||||||
// Sets the objective c class prefix which is prepended to all objective c
|
|
||||||
// generated classes from this .proto. There is no default.
|
|
||||||
optional string objc_class_prefix = 36;
|
|
||||||
|
|
||||||
// Namespace for generated classes; defaults to the package.
|
|
||||||
optional string csharp_namespace = 37;
|
|
||||||
|
|
||||||
// By default Swift generators will take the proto package and CamelCase it
|
|
||||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
|
||||||
// defined. When this options is provided, they will use this value instead
|
|
||||||
// to prefix the types/symbols defined.
|
|
||||||
optional string swift_prefix = 39;
|
|
||||||
|
|
||||||
// Sets the php class prefix which is prepended to all php generated classes
|
|
||||||
// from this .proto. Default is empty.
|
|
||||||
optional string php_class_prefix = 40;
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated classes. Default
|
|
||||||
// is empty. When this option is empty, the package name will be used for
|
|
||||||
// determining the namespace.
|
|
||||||
optional string php_namespace = 41;
|
|
||||||
|
|
||||||
|
|
||||||
// Use this option to change the namespace of php generated metadata classes.
|
|
||||||
// Default is empty. When this option is empty, the proto file name will be used
|
|
||||||
// for determining the namespace.
|
|
||||||
optional string php_metadata_namespace = 44;
|
|
||||||
|
|
||||||
// Use this option to change the package of ruby generated classes. Default
|
|
||||||
// is empty. When this option is not set, the package name will be used for
|
|
||||||
// determining the ruby package.
|
|
||||||
optional string ruby_package = 45;
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message.
|
|
||||||
// See the documentation for the "Options" section above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 38;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MessageOptions {
|
|
||||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
|
||||||
// This is provided for backwards-compatibility with the MessageSet wire
|
|
||||||
// format. You should not use this for any other reason: It's less
|
|
||||||
// efficient, has fewer features, and is more complicated.
|
|
||||||
//
|
|
||||||
// The message must be defined exactly as follows:
|
|
||||||
// message Foo {
|
|
||||||
// option message_set_wire_format = true;
|
|
||||||
// extensions 4 to max;
|
|
||||||
// }
|
|
||||||
// Note that the message cannot have any defined fields; MessageSets only
|
|
||||||
// have extensions.
|
|
||||||
//
|
|
||||||
// All extensions of your type must be singular messages; e.g. they cannot
|
|
||||||
// be int32s, enums, or repeated messages.
|
|
||||||
//
|
|
||||||
// Because this is an option, the above two restrictions are not enforced by
|
|
||||||
// the protocol compiler.
|
|
||||||
optional bool message_set_wire_format = 1 [default=false];
|
|
||||||
|
|
||||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
|
||||||
// conflict with a field of the same name. This is meant to make migration
|
|
||||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
|
||||||
optional bool no_standard_descriptor_accessor = 2 [default=false];
|
|
||||||
|
|
||||||
// Is this message deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the message, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating messages.
|
|
||||||
optional bool deprecated = 3 [default=false];
|
|
||||||
|
|
||||||
// Whether the message is an automatically generated map entry type for the
|
|
||||||
// maps field.
|
|
||||||
//
|
|
||||||
// For maps fields:
|
|
||||||
// map<KeyType, ValueType> map_field = 1;
|
|
||||||
// The parsed descriptor looks like:
|
|
||||||
// message MapFieldEntry {
|
|
||||||
// option map_entry = true;
|
|
||||||
// optional KeyType key = 1;
|
|
||||||
// optional ValueType value = 2;
|
|
||||||
// }
|
|
||||||
// repeated MapFieldEntry map_field = 1;
|
|
||||||
//
|
|
||||||
// Implementations may choose not to generate the map_entry=true message, but
|
|
||||||
// use a native map in the target language to hold the keys and values.
|
|
||||||
// The reflection APIs in such implementions still need to work as
|
|
||||||
// if the field is a repeated message field.
|
|
||||||
//
|
|
||||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
|
||||||
// instead. The option should only be implicitly set by the proto compiler
|
|
||||||
// parser.
|
|
||||||
optional bool map_entry = 7;
|
|
||||||
|
|
||||||
reserved 8; // javalite_serializable
|
|
||||||
reserved 9; // javanano_as_lite
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message FieldOptions {
|
|
||||||
// The ctype option instructs the C++ code generator to use a different
|
|
||||||
// representation of the field than it normally would. See the specific
|
|
||||||
// options below. This option is not yet implemented in the open source
|
|
||||||
// release -- sorry, we'll try to include it in a future version!
|
|
||||||
optional CType ctype = 1 [default = STRING];
|
|
||||||
enum CType {
|
|
||||||
// Default mode.
|
|
||||||
STRING = 0;
|
|
||||||
|
|
||||||
CORD = 1;
|
|
||||||
|
|
||||||
STRING_PIECE = 2;
|
|
||||||
}
|
|
||||||
// The packed option can be enabled for repeated primitive fields to enable
|
|
||||||
// a more efficient representation on the wire. Rather than repeatedly
|
|
||||||
// writing the tag and type for each element, the entire array is encoded as
|
|
||||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
|
||||||
// false will avoid using packed encoding.
|
|
||||||
optional bool packed = 2;
|
|
||||||
|
|
||||||
// The jstype option determines the JavaScript type used for values of the
|
|
||||||
// field. The option is permitted only for 64 bit integral and fixed types
|
|
||||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
|
||||||
// is represented as JavaScript string, which avoids loss of precision that
|
|
||||||
// can happen when a large value is converted to a floating point JavaScript.
|
|
||||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
|
||||||
// use the JavaScript "number" type. The behavior of the default option
|
|
||||||
// JS_NORMAL is implementation dependent.
|
|
||||||
//
|
|
||||||
// This option is an enum to permit additional types to be added, e.g.
|
|
||||||
// goog.math.Integer.
|
|
||||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
|
||||||
enum JSType {
|
|
||||||
// Use the default type.
|
|
||||||
JS_NORMAL = 0;
|
|
||||||
|
|
||||||
// Use JavaScript strings.
|
|
||||||
JS_STRING = 1;
|
|
||||||
|
|
||||||
// Use JavaScript numbers.
|
|
||||||
JS_NUMBER = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
|
||||||
// fields. It means that when the outer message is initially parsed, the
|
|
||||||
// inner message's contents will not be parsed but instead stored in encoded
|
|
||||||
// form. The inner message will actually be parsed when it is first accessed.
|
|
||||||
//
|
|
||||||
// This is only a hint. Implementations are free to choose whether to use
|
|
||||||
// eager or lazy parsing regardless of the value of this option. However,
|
|
||||||
// setting this option true suggests that the protocol author believes that
|
|
||||||
// using lazy parsing on this field is worth the additional bookkeeping
|
|
||||||
// overhead typically needed to implement it.
|
|
||||||
//
|
|
||||||
// This option does not affect the public interface of any generated code;
|
|
||||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
|
||||||
// interface is not affected by this option; const methods remain safe to
|
|
||||||
// call from multiple threads concurrently, while non-const methods continue
|
|
||||||
// to require exclusive access.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Note that implementations may choose not to check required fields within
|
|
||||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
|
||||||
// may return true even if the inner message has missing required fields.
|
|
||||||
// This is necessary because otherwise the inner message would have to be
|
|
||||||
// parsed in order to perform the check, defeating the purpose of lazy
|
|
||||||
// parsing. An implementation which chooses not to check required fields
|
|
||||||
// must be consistent about it. That is, for any particular sub-message, the
|
|
||||||
// implementation must either *always* check its required fields, or *never*
|
|
||||||
// check its required fields, regardless of whether or not the message has
|
|
||||||
// been parsed.
|
|
||||||
optional bool lazy = 5 [default=false];
|
|
||||||
|
|
||||||
// Is this field deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for accessors, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating fields.
|
|
||||||
optional bool deprecated = 3 [default=false];
|
|
||||||
|
|
||||||
// For Google-internal migration only. Do not use.
|
|
||||||
optional bool weak = 10 [default=false];
|
|
||||||
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
|
|
||||||
reserved 4; // removed jtype
|
|
||||||
}
|
|
||||||
|
|
||||||
message OneofOptions {
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumOptions {
|
|
||||||
|
|
||||||
// Set this option to true to allow mapping different tag names to the same
|
|
||||||
// value.
|
|
||||||
optional bool allow_alias = 2;
|
|
||||||
|
|
||||||
// Is this enum deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum, or it will be completely ignored; in the very least, this
|
|
||||||
// is a formalization for deprecating enums.
|
|
||||||
optional bool deprecated = 3 [default=false];
|
|
||||||
|
|
||||||
reserved 5; // javanano_as_lite
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message EnumValueOptions {
|
|
||||||
// Is this enum value deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the enum value, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating enum values.
|
|
||||||
optional bool deprecated = 1 [default=false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServiceOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this service deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the service, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating services.
|
|
||||||
optional bool deprecated = 33 [default=false];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
message MethodOptions {
|
|
||||||
|
|
||||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
|
||||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
|
||||||
// we were already using them long before we decided to release Protocol
|
|
||||||
// Buffers.
|
|
||||||
|
|
||||||
// Is this method deprecated?
|
|
||||||
// Depending on the target platform, this can emit Deprecated annotations
|
|
||||||
// for the method, or it will be completely ignored; in the very least,
|
|
||||||
// this is a formalization for deprecating methods.
|
|
||||||
optional bool deprecated = 33 [default=false];
|
|
||||||
|
|
||||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
|
||||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
|
||||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
|
||||||
enum IdempotencyLevel {
|
|
||||||
IDEMPOTENCY_UNKNOWN = 0;
|
|
||||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
|
||||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
|
||||||
}
|
|
||||||
optional IdempotencyLevel idempotency_level =
|
|
||||||
34 [default=IDEMPOTENCY_UNKNOWN];
|
|
||||||
|
|
||||||
// The parser stores options it doesn't recognize here. See above.
|
|
||||||
repeated UninterpretedOption uninterpreted_option = 999;
|
|
||||||
|
|
||||||
// Clients can define custom options in extensions of this message. See above.
|
|
||||||
extensions 1000 to max;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// A message representing a option the parser does not recognize. This only
|
|
||||||
// appears in options protos created by the compiler::Parser class.
|
|
||||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
|
||||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
|
||||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
|
||||||
// in them.
|
|
||||||
message UninterpretedOption {
|
|
||||||
// The name of the uninterpreted option. Each string represents a segment in
|
|
||||||
// a dot-separated name. is_extension is true iff a segment represents an
|
|
||||||
// extension (denoted with parentheses in options specs in .proto files).
|
|
||||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
|
||||||
// "foo.(bar.baz).qux".
|
|
||||||
message NamePart {
|
|
||||||
required string name_part = 1;
|
|
||||||
required bool is_extension = 2;
|
|
||||||
}
|
|
||||||
repeated NamePart name = 2;
|
|
||||||
|
|
||||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
|
||||||
// identified it as during parsing. Exactly one of these should be set.
|
|
||||||
optional string identifier_value = 3;
|
|
||||||
optional uint64 positive_int_value = 4;
|
|
||||||
optional int64 negative_int_value = 5;
|
|
||||||
optional double double_value = 6;
|
|
||||||
optional bytes string_value = 7;
|
|
||||||
optional string aggregate_value = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ===================================================================
|
|
||||||
// Optional source code info
|
|
||||||
|
|
||||||
// Encapsulates information about the original source file from which a
|
|
||||||
// FileDescriptorProto was generated.
|
|
||||||
message SourceCodeInfo {
|
|
||||||
// A Location identifies a piece of source code in a .proto file which
|
|
||||||
// corresponds to a particular definition. This information is intended
|
|
||||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
|
||||||
// tools.
|
|
||||||
//
|
|
||||||
// For example, say we have a file like:
|
|
||||||
// message Foo {
|
|
||||||
// optional string foo = 1;
|
|
||||||
// }
|
|
||||||
// Let's look at just the field definition:
|
|
||||||
// optional string foo = 1;
|
|
||||||
// ^ ^^ ^^ ^ ^^^
|
|
||||||
// a bc de f ghi
|
|
||||||
// We have the following locations:
|
|
||||||
// span path represents
|
|
||||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
|
||||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
|
||||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
|
||||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
|
||||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
|
||||||
//
|
|
||||||
// Notes:
|
|
||||||
// - A location may refer to a repeated field itself (i.e. not to any
|
|
||||||
// particular index within it). This is used whenever a set of elements are
|
|
||||||
// logically enclosed in a single code segment. For example, an entire
|
|
||||||
// extend block (possibly containing multiple extension definitions) will
|
|
||||||
// have an outer location whose path refers to the "extensions" repeated
|
|
||||||
// field without an index.
|
|
||||||
// - Multiple locations may have the same path. This happens when a single
|
|
||||||
// logical declaration is spread out across multiple places. The most
|
|
||||||
// obvious example is the "extend" block again -- there may be multiple
|
|
||||||
// extend blocks in the same scope, each of which will have the same path.
|
|
||||||
// - A location's span is not always a subset of its parent's span. For
|
|
||||||
// example, the "extendee" of an extension declaration appears at the
|
|
||||||
// beginning of the "extend" block and is shared by all extensions within
|
|
||||||
// the block.
|
|
||||||
// - Just because a location's span is a subset of some other location's span
|
|
||||||
// does not mean that it is a descendent. For example, a "group" defines
|
|
||||||
// both a type and a field in a single declaration. Thus, the locations
|
|
||||||
// corresponding to the type and field and their components will overlap.
|
|
||||||
// - Code which tries to interpret locations should probably be designed to
|
|
||||||
// ignore those that it doesn't understand, as more types of locations could
|
|
||||||
// be recorded in the future.
|
|
||||||
repeated Location location = 1;
|
|
||||||
message Location {
|
|
||||||
// Identifies which part of the FileDescriptorProto was defined at this
|
|
||||||
// location.
|
|
||||||
//
|
|
||||||
// Each element is a field number or an index. They form a path from
|
|
||||||
// the root FileDescriptorProto to the place where the definition. For
|
|
||||||
// example, this path:
|
|
||||||
// [ 4, 3, 2, 7, 1 ]
|
|
||||||
// refers to:
|
|
||||||
// file.message_type(3) // 4, 3
|
|
||||||
// .field(7) // 2, 7
|
|
||||||
// .name() // 1
|
|
||||||
// This is because FileDescriptorProto.message_type has field number 4:
|
|
||||||
// repeated DescriptorProto message_type = 4;
|
|
||||||
// and DescriptorProto.field has field number 2:
|
|
||||||
// repeated FieldDescriptorProto field = 2;
|
|
||||||
// and FieldDescriptorProto.name has field number 1:
|
|
||||||
// optional string name = 1;
|
|
||||||
//
|
|
||||||
// Thus, the above path gives the location of a field name. If we removed
|
|
||||||
// the last element:
|
|
||||||
// [ 4, 3, 2, 7 ]
|
|
||||||
// this path refers to the whole field declaration (from the beginning
|
|
||||||
// of the label to the terminating semicolon).
|
|
||||||
repeated int32 path = 1 [packed=true];
|
|
||||||
|
|
||||||
// Always has exactly three or four elements: start line, start column,
|
|
||||||
// end line (optional, otherwise assumed same as start line), end column.
|
|
||||||
// These are packed into a single field for efficiency. Note that line
|
|
||||||
// and column numbers are zero-based -- typically you will want to add
|
|
||||||
// 1 to each before displaying to a user.
|
|
||||||
repeated int32 span = 2 [packed=true];
|
|
||||||
|
|
||||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
|
||||||
// comments appearing before and after the declaration which appear to be
|
|
||||||
// attached to the declaration.
|
|
||||||
//
|
|
||||||
// A series of line comments appearing on consecutive lines, with no other
|
|
||||||
// tokens appearing on those lines, will be treated as a single comment.
|
|
||||||
//
|
|
||||||
// leading_detached_comments will keep paragraphs of comments that appear
|
|
||||||
// before (but not connected to) the current element. Each paragraph,
|
|
||||||
// separated by empty lines, will be one comment element in the repeated
|
|
||||||
// field.
|
|
||||||
//
|
|
||||||
// Only the comment content is provided; comment markers (e.g. //) are
|
|
||||||
// stripped out. For block comments, leading whitespace and an asterisk
|
|
||||||
// will be stripped from the beginning of each line other than the first.
|
|
||||||
// Newlines are included in the output.
|
|
||||||
//
|
|
||||||
// Examples:
|
|
||||||
//
|
|
||||||
// optional int32 foo = 1; // Comment attached to foo.
|
|
||||||
// // Comment attached to bar.
|
|
||||||
// optional int32 bar = 2;
|
|
||||||
//
|
|
||||||
// optional string baz = 3;
|
|
||||||
// // Comment attached to baz.
|
|
||||||
// // Another line attached to baz.
|
|
||||||
//
|
|
||||||
// // Comment attached to qux.
|
|
||||||
// //
|
|
||||||
// // Another line attached to qux.
|
|
||||||
// optional double qux = 4;
|
|
||||||
//
|
|
||||||
// // Detached comment for corge. This is not leading or trailing comments
|
|
||||||
// // to qux or corge because there are blank lines separating it from
|
|
||||||
// // both.
|
|
||||||
//
|
|
||||||
// // Detached comment for corge paragraph 2.
|
|
||||||
//
|
|
||||||
// optional string corge = 5;
|
|
||||||
// /* Block comment attached
|
|
||||||
// * to corge. Leading asterisks
|
|
||||||
// * will be removed. */
|
|
||||||
// /* Block comment attached to
|
|
||||||
// * grault. */
|
|
||||||
// optional int32 grault = 6;
|
|
||||||
//
|
|
||||||
// // ignored detached comments.
|
|
||||||
optional string leading_comments = 3;
|
|
||||||
optional string trailing_comments = 4;
|
|
||||||
repeated string leading_detached_comments = 6;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes the relationship between generated code and its original source
|
|
||||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
|
||||||
// source file, but may contain references to different source .proto files.
|
|
||||||
message GeneratedCodeInfo {
|
|
||||||
// An Annotation connects some span of text in generated code to an element
|
|
||||||
// of its generating .proto file.
|
|
||||||
repeated Annotation annotation = 1;
|
|
||||||
message Annotation {
|
|
||||||
// Identifies the element in the original source .proto file. This field
|
|
||||||
// is formatted the same as SourceCodeInfo.Location.path.
|
|
||||||
repeated int32 path = 1 [packed=true];
|
|
||||||
|
|
||||||
// Identifies the filesystem path to the original source .proto.
|
|
||||||
optional string source_file = 2;
|
|
||||||
|
|
||||||
// Identifies the starting offset in bytes in the generated code
|
|
||||||
// that relates to the identified object.
|
|
||||||
optional int32 begin = 3;
|
|
||||||
|
|
||||||
// Identifies the ending offset in bytes in the generated code that
|
|
||||||
// relates to the identified offset. The end offset should be one past
|
|
||||||
// the last relevant byte (so the length of the text = end - begin).
|
|
||||||
optional int32 end = 4;
|
|
||||||
}
|
|
||||||
}
|
|
2806
vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
generated
vendored
2806
vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
generated
vendored
File diff suppressed because it is too large
Load diff
117
vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
generated
vendored
117
vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
generated
vendored
|
@ -1,117 +0,0 @@
|
||||||
// Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
//
|
|
||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// https://github.com/golang/protobuf
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package remap handles tracking the locations of Go tokens in a source text
|
|
||||||
across a rewrite by the Go formatter.
|
|
||||||
*/
|
|
||||||
package remap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/scanner"
|
|
||||||
"go/token"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Location represents a span of byte offsets in the source text.
|
|
||||||
type Location struct {
|
|
||||||
Pos, End int // End is exclusive
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Map represents a mapping between token locations in an input source text
|
|
||||||
// and locations in the correspnding output text.
|
|
||||||
type Map map[Location]Location
|
|
||||||
|
|
||||||
// Find reports whether the specified span is recorded by m, and if so returns
|
|
||||||
// the new location it was mapped to. If the input span was not found, the
|
|
||||||
// returned location is the same as the input.
|
|
||||||
func (m Map) Find(pos, end int) (Location, bool) {
|
|
||||||
key := Location{
|
|
||||||
Pos: pos,
|
|
||||||
End: end,
|
|
||||||
}
|
|
||||||
if loc, ok := m[key]; ok {
|
|
||||||
return loc, true
|
|
||||||
}
|
|
||||||
return key, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Map) add(opos, oend, npos, nend int) {
|
|
||||||
m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute constructs a location mapping from input to output. An error is
|
|
||||||
// reported if any of the tokens of output cannot be mapped.
|
|
||||||
func Compute(input, output []byte) (Map, error) {
|
|
||||||
itok := tokenize(input)
|
|
||||||
otok := tokenize(output)
|
|
||||||
if len(itok) != len(otok) {
|
|
||||||
return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
|
|
||||||
}
|
|
||||||
m := make(Map)
|
|
||||||
for i, ti := range itok {
|
|
||||||
to := otok[i]
|
|
||||||
if ti.Token != to.Token {
|
|
||||||
return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
|
|
||||||
}
|
|
||||||
m.add(ti.pos, ti.end, to.pos, to.end)
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// tokinfo records the span and type of a source token.
|
|
||||||
type tokinfo struct {
|
|
||||||
pos, end int
|
|
||||||
token.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokenize(src []byte) []tokinfo {
|
|
||||||
fs := token.NewFileSet()
|
|
||||||
var s scanner.Scanner
|
|
||||||
s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
|
|
||||||
var info []tokinfo
|
|
||||||
for {
|
|
||||||
pos, next, lit := s.Scan()
|
|
||||||
switch next {
|
|
||||||
case token.SEMICOLON:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
info = append(info, tokinfo{
|
|
||||||
pos: int(pos - 1),
|
|
||||||
end: int(pos + token.Pos(len(lit)) - 1),
|
|
||||||
Token: next,
|
|
||||||
})
|
|
||||||
if next == token.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return info
|
|
||||||
}
|
|
369
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
generated
vendored
369
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
generated
vendored
|
@ -1,369 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: google/protobuf/compiler/plugin.proto
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package plugin_go is a generated protocol buffer package.
|
|
||||||
|
|
||||||
It is generated from these files:
|
|
||||||
google/protobuf/compiler/plugin.proto
|
|
||||||
|
|
||||||
It has these top-level messages:
|
|
||||||
Version
|
|
||||||
CodeGeneratorRequest
|
|
||||||
CodeGeneratorResponse
|
|
||||||
*/
|
|
||||||
package plugin_go
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// The version number of protocol compiler.
|
|
||||||
type Version struct {
|
|
||||||
Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
|
|
||||||
Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
|
|
||||||
Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
|
|
||||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
|
||||||
// be empty for mainline stable releases.
|
|
||||||
Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Version) Reset() { *m = Version{} }
|
|
||||||
func (m *Version) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Version) ProtoMessage() {}
|
|
||||||
func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
|
||||||
func (m *Version) Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Version.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *Version) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Version.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *Version) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Version.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Version) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Version.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Version proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Version) GetMajor() int32 {
|
|
||||||
if m != nil && m.Major != nil {
|
|
||||||
return *m.Major
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Version) GetMinor() int32 {
|
|
||||||
if m != nil && m.Minor != nil {
|
|
||||||
return *m.Minor
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Version) GetPatch() int32 {
|
|
||||||
if m != nil && m.Patch != nil {
|
|
||||||
return *m.Patch
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Version) GetSuffix() string {
|
|
||||||
if m != nil && m.Suffix != nil {
|
|
||||||
return *m.Suffix
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
|
||||||
type CodeGeneratorRequest struct {
|
|
||||||
// The .proto files that were explicitly listed on the command-line. The
|
|
||||||
// code generator should generate code only for these files. Each file's
|
|
||||||
// descriptor will be included in proto_file, below.
|
|
||||||
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
|
|
||||||
// The generator parameter passed on the command-line.
|
|
||||||
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
|
|
||||||
// FileDescriptorProtos for all files in files_to_generate and everything
|
|
||||||
// they import. The files will appear in topological order, so each file
|
|
||||||
// appears before any file that imports it.
|
|
||||||
//
|
|
||||||
// protoc guarantees that all proto_files will be written after
|
|
||||||
// the fields above, even though this is not technically guaranteed by the
|
|
||||||
// protobuf wire format. This theoretically could allow a plugin to stream
|
|
||||||
// in the FileDescriptorProtos and handle them one by one rather than read
|
|
||||||
// the entire set into memory at once. However, as of this writing, this
|
|
||||||
// is not similarly optimized on protoc's end -- it will store all fields in
|
|
||||||
// memory at once before sending them to the plugin.
|
|
||||||
//
|
|
||||||
// Type names of fields and extensions in the FileDescriptorProto are always
|
|
||||||
// fully qualified.
|
|
||||||
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
|
|
||||||
// The version number of protocol compiler.
|
|
||||||
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
|
|
||||||
func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*CodeGeneratorRequest) ProtoMessage() {}
|
|
||||||
func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
|
||||||
func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorRequest) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_CodeGeneratorRequest.Size(m)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
|
|
||||||
if m != nil {
|
|
||||||
return m.FileToGenerate
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorRequest) GetParameter() string {
|
|
||||||
if m != nil && m.Parameter != nil {
|
|
||||||
return *m.Parameter
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
|
|
||||||
if m != nil {
|
|
||||||
return m.ProtoFile
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
|
|
||||||
if m != nil {
|
|
||||||
return m.CompilerVersion
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
|
||||||
type CodeGeneratorResponse struct {
|
|
||||||
// Error message. If non-empty, code generation failed. The plugin process
|
|
||||||
// should exit with status code zero even if it reports an error in this way.
|
|
||||||
//
|
|
||||||
// This should be used to indicate errors in .proto files which prevent the
|
|
||||||
// code generator from generating correct code. Errors which indicate a
|
|
||||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
|
||||||
// unparseable -- should be reported by writing a message to stderr and
|
|
||||||
// exiting with a non-zero status code.
|
|
||||||
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
|
||||||
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
|
|
||||||
func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*CodeGeneratorResponse) ProtoMessage() {}
|
|
||||||
func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
|
||||||
func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse.Size(m)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse) GetError() string {
|
|
||||||
if m != nil && m.Error != nil {
|
|
||||||
return *m.Error
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
|
|
||||||
if m != nil {
|
|
||||||
return m.File
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represents a single generated file.
|
|
||||||
type CodeGeneratorResponse_File struct {
|
|
||||||
// The file name, relative to the output directory. The name must not
|
|
||||||
// contain "." or ".." components and must be relative, not be absolute (so,
|
|
||||||
// the file cannot lie outside the output directory). "/" must be used as
|
|
||||||
// the path separator, not "\".
|
|
||||||
//
|
|
||||||
// If the name is omitted, the content will be appended to the previous
|
|
||||||
// file. This allows the generator to break large files into small chunks,
|
|
||||||
// and allows the generated text to be streamed back to protoc so that large
|
|
||||||
// files need not reside completely in memory at one time. Note that as of
|
|
||||||
// this writing protoc does not optimize for this -- it will read the entire
|
|
||||||
// CodeGeneratorResponse before writing files to disk.
|
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
// If non-empty, indicates that the named file should already exist, and the
|
|
||||||
// content here is to be inserted into that file at a defined insertion
|
|
||||||
// point. This feature allows a code generator to extend the output
|
|
||||||
// produced by another code generator. The original generator may provide
|
|
||||||
// insertion points by placing special annotations in the file that look
|
|
||||||
// like:
|
|
||||||
// @@protoc_insertion_point(NAME)
|
|
||||||
// The annotation can have arbitrary text before and after it on the line,
|
|
||||||
// which allows it to be placed in a comment. NAME should be replaced with
|
|
||||||
// an identifier naming the point -- this is what other generators will use
|
|
||||||
// as the insertion_point. Code inserted at this point will be placed
|
|
||||||
// immediately above the line containing the insertion point (thus multiple
|
|
||||||
// insertions to the same point will come out in the order they were added).
|
|
||||||
// The double-@ is intended to make it unlikely that the generated code
|
|
||||||
// could contain things that look like insertion points by accident.
|
|
||||||
//
|
|
||||||
// For example, the C++ code generator places the following line in the
|
|
||||||
// .pb.h files that it generates:
|
|
||||||
// // @@protoc_insertion_point(namespace_scope)
|
|
||||||
// This line appears within the scope of the file's package namespace, but
|
|
||||||
// outside of any particular class. Another plugin can then specify the
|
|
||||||
// insertion_point "namespace_scope" to generate additional classes or
|
|
||||||
// other declarations that should be placed in this scope.
|
|
||||||
//
|
|
||||||
// Note that if the line containing the insertion point begins with
|
|
||||||
// whitespace, the same whitespace will be added to every line of the
|
|
||||||
// inserted text. This is useful for languages like Python, where
|
|
||||||
// indentation matters. In these languages, the insertion point comment
|
|
||||||
// should be indented the same amount as any inserted code will need to be
|
|
||||||
// in order to work correctly in that context.
|
|
||||||
//
|
|
||||||
// The code generator that generates the initial file and the one which
|
|
||||||
// inserts into it must both run as part of a single invocation of protoc.
|
|
||||||
// Code generators are executed in the order in which they appear on the
|
|
||||||
// command line.
|
|
||||||
//
|
|
||||||
// If |insertion_point| is present, |name| must also be present.
|
|
||||||
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
|
|
||||||
// The file contents.
|
|
||||||
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
|
|
||||||
func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*CodeGeneratorResponse_File) ProtoMessage() {}
|
|
||||||
func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
|
|
||||||
func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse_File) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
|
|
||||||
}
|
|
||||||
func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse_File) GetName() string {
|
|
||||||
if m != nil && m.Name != nil {
|
|
||||||
return *m.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
|
|
||||||
if m != nil && m.InsertionPoint != nil {
|
|
||||||
return *m.InsertionPoint
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *CodeGeneratorResponse_File) GetContent() string {
|
|
||||||
if m != nil && m.Content != nil {
|
|
||||||
return *m.Content
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
|
|
||||||
proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
|
|
||||||
proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
|
|
||||||
proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
|
|
||||||
|
|
||||||
var fileDescriptor0 = []byte{
|
|
||||||
// 417 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
|
|
||||||
0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
|
|
||||||
0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
|
|
||||||
0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
|
|
||||||
0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
|
|
||||||
0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
|
|
||||||
0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
|
|
||||||
0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
|
|
||||||
0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
|
|
||||||
0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
|
|
||||||
0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
|
|
||||||
0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
|
|
||||||
0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
|
|
||||||
0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
|
|
||||||
0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
|
|
||||||
0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
|
|
||||||
0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
|
|
||||||
0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
|
|
||||||
0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
|
|
||||||
0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
|
|
||||||
0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
|
|
||||||
0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
|
|
||||||
0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
|
|
||||||
0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
|
|
||||||
0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
|
|
||||||
0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
|
|
||||||
0x00,
|
|
||||||
}
|
|
83
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
generated
vendored
83
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
generated
vendored
|
@ -1,83 +0,0 @@
|
||||||
// Code generated by protoc-gen-go.
|
|
||||||
// source: google/protobuf/compiler/plugin.proto
|
|
||||||
// DO NOT EDIT!
|
|
||||||
|
|
||||||
package google_protobuf_compiler
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import "math"
|
|
||||||
import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
|
||||||
|
|
||||||
// Reference proto and math imports to suppress error if they are not otherwise used.
|
|
||||||
var _ = proto.GetString
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
type CodeGeneratorRequest struct {
|
|
||||||
FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
|
|
||||||
Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
|
|
||||||
ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
|
|
||||||
func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
|
|
||||||
func (*CodeGeneratorRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorRequest) GetParameter() string {
|
|
||||||
if this != nil && this.Parameter != nil {
|
|
||||||
return *this.Parameter
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type CodeGeneratorResponse struct {
|
|
||||||
Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
|
|
||||||
File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
|
|
||||||
func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
|
|
||||||
func (*CodeGeneratorResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse) GetError() string {
|
|
||||||
if this != nil && this.Error != nil {
|
|
||||||
return *this.Error
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type CodeGeneratorResponse_File struct {
|
|
||||||
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
|
||||||
InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
|
|
||||||
Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
|
|
||||||
func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
|
|
||||||
func (*CodeGeneratorResponse_File) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse_File) GetName() string {
|
|
||||||
if this != nil && this.Name != nil {
|
|
||||||
return *this.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
|
|
||||||
if this != nil && this.InsertionPoint != nil {
|
|
||||||
return *this.InsertionPoint
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (this *CodeGeneratorResponse_File) GetContent() string {
|
|
||||||
if this != nil && this.Content != nil {
|
|
||||||
return *this.Content
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
167
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
generated
vendored
167
vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
generated
vendored
|
@ -1,167 +0,0 @@
|
||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Author: kenton@google.com (Kenton Varda)
|
|
||||||
//
|
|
||||||
// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
|
|
||||||
// change.
|
|
||||||
//
|
|
||||||
// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
|
|
||||||
// just a program that reads a CodeGeneratorRequest from stdin and writes a
|
|
||||||
// CodeGeneratorResponse to stdout.
|
|
||||||
//
|
|
||||||
// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
|
|
||||||
// of dealing with the raw protocol defined here.
|
|
||||||
//
|
|
||||||
// A plugin executable needs only to be placed somewhere in the path. The
|
|
||||||
// plugin should be named "protoc-gen-$NAME", and will then be used when the
|
|
||||||
// flag "--${NAME}_out" is passed to protoc.
|
|
||||||
|
|
||||||
syntax = "proto2";
|
|
||||||
package google.protobuf.compiler;
|
|
||||||
option java_package = "com.google.protobuf.compiler";
|
|
||||||
option java_outer_classname = "PluginProtos";
|
|
||||||
|
|
||||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
|
|
||||||
|
|
||||||
import "google/protobuf/descriptor.proto";
|
|
||||||
|
|
||||||
// The version number of protocol compiler.
|
|
||||||
message Version {
|
|
||||||
optional int32 major = 1;
|
|
||||||
optional int32 minor = 2;
|
|
||||||
optional int32 patch = 3;
|
|
||||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
|
||||||
// be empty for mainline stable releases.
|
|
||||||
optional string suffix = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
// An encoded CodeGeneratorRequest is written to the plugin's stdin.
|
|
||||||
message CodeGeneratorRequest {
|
|
||||||
// The .proto files that were explicitly listed on the command-line. The
|
|
||||||
// code generator should generate code only for these files. Each file's
|
|
||||||
// descriptor will be included in proto_file, below.
|
|
||||||
repeated string file_to_generate = 1;
|
|
||||||
|
|
||||||
// The generator parameter passed on the command-line.
|
|
||||||
optional string parameter = 2;
|
|
||||||
|
|
||||||
// FileDescriptorProtos for all files in files_to_generate and everything
|
|
||||||
// they import. The files will appear in topological order, so each file
|
|
||||||
// appears before any file that imports it.
|
|
||||||
//
|
|
||||||
// protoc guarantees that all proto_files will be written after
|
|
||||||
// the fields above, even though this is not technically guaranteed by the
|
|
||||||
// protobuf wire format. This theoretically could allow a plugin to stream
|
|
||||||
// in the FileDescriptorProtos and handle them one by one rather than read
|
|
||||||
// the entire set into memory at once. However, as of this writing, this
|
|
||||||
// is not similarly optimized on protoc's end -- it will store all fields in
|
|
||||||
// memory at once before sending them to the plugin.
|
|
||||||
//
|
|
||||||
// Type names of fields and extensions in the FileDescriptorProto are always
|
|
||||||
// fully qualified.
|
|
||||||
repeated FileDescriptorProto proto_file = 15;
|
|
||||||
|
|
||||||
// The version number of protocol compiler.
|
|
||||||
optional Version compiler_version = 3;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// The plugin writes an encoded CodeGeneratorResponse to stdout.
|
|
||||||
message CodeGeneratorResponse {
|
|
||||||
// Error message. If non-empty, code generation failed. The plugin process
|
|
||||||
// should exit with status code zero even if it reports an error in this way.
|
|
||||||
//
|
|
||||||
// This should be used to indicate errors in .proto files which prevent the
|
|
||||||
// code generator from generating correct code. Errors which indicate a
|
|
||||||
// problem in protoc itself -- such as the input CodeGeneratorRequest being
|
|
||||||
// unparseable -- should be reported by writing a message to stderr and
|
|
||||||
// exiting with a non-zero status code.
|
|
||||||
optional string error = 1;
|
|
||||||
|
|
||||||
// Represents a single generated file.
|
|
||||||
message File {
|
|
||||||
// The file name, relative to the output directory. The name must not
|
|
||||||
// contain "." or ".." components and must be relative, not be absolute (so,
|
|
||||||
// the file cannot lie outside the output directory). "/" must be used as
|
|
||||||
// the path separator, not "\".
|
|
||||||
//
|
|
||||||
// If the name is omitted, the content will be appended to the previous
|
|
||||||
// file. This allows the generator to break large files into small chunks,
|
|
||||||
// and allows the generated text to be streamed back to protoc so that large
|
|
||||||
// files need not reside completely in memory at one time. Note that as of
|
|
||||||
// this writing protoc does not optimize for this -- it will read the entire
|
|
||||||
// CodeGeneratorResponse before writing files to disk.
|
|
||||||
optional string name = 1;
|
|
||||||
|
|
||||||
// If non-empty, indicates that the named file should already exist, and the
|
|
||||||
// content here is to be inserted into that file at a defined insertion
|
|
||||||
// point. This feature allows a code generator to extend the output
|
|
||||||
// produced by another code generator. The original generator may provide
|
|
||||||
// insertion points by placing special annotations in the file that look
|
|
||||||
// like:
|
|
||||||
// @@protoc_insertion_point(NAME)
|
|
||||||
// The annotation can have arbitrary text before and after it on the line,
|
|
||||||
// which allows it to be placed in a comment. NAME should be replaced with
|
|
||||||
// an identifier naming the point -- this is what other generators will use
|
|
||||||
// as the insertion_point. Code inserted at this point will be placed
|
|
||||||
// immediately above the line containing the insertion point (thus multiple
|
|
||||||
// insertions to the same point will come out in the order they were added).
|
|
||||||
// The double-@ is intended to make it unlikely that the generated code
|
|
||||||
// could contain things that look like insertion points by accident.
|
|
||||||
//
|
|
||||||
// For example, the C++ code generator places the following line in the
|
|
||||||
// .pb.h files that it generates:
|
|
||||||
// // @@protoc_insertion_point(namespace_scope)
|
|
||||||
// This line appears within the scope of the file's package namespace, but
|
|
||||||
// outside of any particular class. Another plugin can then specify the
|
|
||||||
// insertion_point "namespace_scope" to generate additional classes or
|
|
||||||
// other declarations that should be placed in this scope.
|
|
||||||
//
|
|
||||||
// Note that if the line containing the insertion point begins with
|
|
||||||
// whitespace, the same whitespace will be added to every line of the
|
|
||||||
// inserted text. This is useful for languages like Python, where
|
|
||||||
// indentation matters. In these languages, the insertion point comment
|
|
||||||
// should be indented the same amount as any inserted code will need to be
|
|
||||||
// in order to work correctly in that context.
|
|
||||||
//
|
|
||||||
// The code generator that generates the initial file and the one which
|
|
||||||
// inserts into it must both run as part of a single invocation of protoc.
|
|
||||||
// Code generators are executed in the order in which they appear on the
|
|
||||||
// command line.
|
|
||||||
//
|
|
||||||
// If |insertion_point| is present, |name| must also be present.
|
|
||||||
optional string insertion_point = 2;
|
|
||||||
|
|
||||||
// The file contents.
|
|
||||||
optional string content = 15;
|
|
||||||
}
|
|
||||||
repeated File file = 15;
|
|
||||||
}
|
|
336
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
336
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
|
@ -1,336 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: google/protobuf/struct.proto
|
|
||||||
|
|
||||||
package structpb
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
|
||||||
// `Value` type union.
|
|
||||||
//
|
|
||||||
// The JSON representation for `NullValue` is JSON `null`.
|
|
||||||
type NullValue int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Null value.
|
|
||||||
NullValue_NULL_VALUE NullValue = 0
|
|
||||||
)
|
|
||||||
|
|
||||||
var NullValue_name = map[int32]string{
|
|
||||||
0: "NULL_VALUE",
|
|
||||||
}
|
|
||||||
|
|
||||||
var NullValue_value = map[string]int32{
|
|
||||||
"NULL_VALUE": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x NullValue) String() string {
|
|
||||||
return proto.EnumName(NullValue_name, int32(x))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (NullValue) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_df322afd6c9fb402, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
|
|
||||||
|
|
||||||
// `Struct` represents a structured data value, consisting of fields
|
|
||||||
// which map to dynamically typed values. In some languages, `Struct`
|
|
||||||
// might be supported by a native representation. For example, in
|
|
||||||
// scripting languages like JS a struct is represented as an
|
|
||||||
// object. The details of that representation are described together
|
|
||||||
// with the proto support for the language.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Struct` is JSON object.
|
|
||||||
type Struct struct {
|
|
||||||
// Unordered map of dynamically typed values.
|
|
||||||
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Struct) Reset() { *m = Struct{} }
|
|
||||||
func (m *Struct) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Struct) ProtoMessage() {}
|
|
||||||
func (*Struct) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_df322afd6c9fb402, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Struct) XXX_WellKnownType() string { return "Struct" }
|
|
||||||
|
|
||||||
func (m *Struct) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Struct.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Struct) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Struct.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Struct) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Struct.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Struct) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Struct.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Struct proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Struct) GetFields() map[string]*Value {
|
|
||||||
if m != nil {
|
|
||||||
return m.Fields
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// `Value` represents a dynamically typed value which can be either
|
|
||||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
|
||||||
// list of values. A producer of value is expected to set one of that
|
|
||||||
// variants, absence of any variant indicates an error.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Value` is JSON value.
|
|
||||||
type Value struct {
|
|
||||||
// The kind of value.
|
|
||||||
//
|
|
||||||
// Types that are valid to be assigned to Kind:
|
|
||||||
// *Value_NullValue
|
|
||||||
// *Value_NumberValue
|
|
||||||
// *Value_StringValue
|
|
||||||
// *Value_BoolValue
|
|
||||||
// *Value_StructValue
|
|
||||||
// *Value_ListValue
|
|
||||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) Reset() { *m = Value{} }
|
|
||||||
func (m *Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Value) ProtoMessage() {}
|
|
||||||
func (*Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_df322afd6c9fb402, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Value) XXX_WellKnownType() string { return "Value" }
|
|
||||||
|
|
||||||
func (m *Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
type isValue_Kind interface {
|
|
||||||
isValue_Kind()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_NullValue struct {
|
|
||||||
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_NumberValue struct {
|
|
||||||
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_StringValue struct {
|
|
||||||
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_BoolValue struct {
|
|
||||||
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_StructValue struct {
|
|
||||||
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Value_ListValue struct {
|
|
||||||
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Value_NullValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (*Value_NumberValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (*Value_StringValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (*Value_BoolValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (*Value_StructValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (*Value_ListValue) isValue_Kind() {}
|
|
||||||
|
|
||||||
func (m *Value) GetKind() isValue_Kind {
|
|
||||||
if m != nil {
|
|
||||||
return m.Kind
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetNullValue() NullValue {
|
|
||||||
if x, ok := m.GetKind().(*Value_NullValue); ok {
|
|
||||||
return x.NullValue
|
|
||||||
}
|
|
||||||
return NullValue_NULL_VALUE
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetNumberValue() float64 {
|
|
||||||
if x, ok := m.GetKind().(*Value_NumberValue); ok {
|
|
||||||
return x.NumberValue
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetStringValue() string {
|
|
||||||
if x, ok := m.GetKind().(*Value_StringValue); ok {
|
|
||||||
return x.StringValue
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetBoolValue() bool {
|
|
||||||
if x, ok := m.GetKind().(*Value_BoolValue); ok {
|
|
||||||
return x.BoolValue
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetStructValue() *Struct {
|
|
||||||
if x, ok := m.GetKind().(*Value_StructValue); ok {
|
|
||||||
return x.StructValue
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Value) GetListValue() *ListValue {
|
|
||||||
if x, ok := m.GetKind().(*Value_ListValue); ok {
|
|
||||||
return x.ListValue
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
|
||||||
func (*Value) XXX_OneofWrappers() []interface{} {
|
|
||||||
return []interface{}{
|
|
||||||
(*Value_NullValue)(nil),
|
|
||||||
(*Value_NumberValue)(nil),
|
|
||||||
(*Value_StringValue)(nil),
|
|
||||||
(*Value_BoolValue)(nil),
|
|
||||||
(*Value_StructValue)(nil),
|
|
||||||
(*Value_ListValue)(nil),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// `ListValue` is a wrapper around a repeated field of values.
|
|
||||||
//
|
|
||||||
// The JSON representation for `ListValue` is JSON array.
|
|
||||||
type ListValue struct {
|
|
||||||
// Repeated field of dynamically typed values.
|
|
||||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ListValue) Reset() { *m = ListValue{} }
|
|
||||||
func (m *ListValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*ListValue) ProtoMessage() {}
|
|
||||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_df322afd6c9fb402, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
|
|
||||||
|
|
||||||
func (m *ListValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_ListValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *ListValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_ListValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *ListValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_ListValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *ListValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_ListValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_ListValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *ListValue) GetValues() []*Value {
|
|
||||||
if m != nil {
|
|
||||||
return m.Values
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
|
|
||||||
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
|
|
||||||
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
|
|
||||||
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
|
|
||||||
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
|
|
||||||
|
|
||||||
var fileDescriptor_df322afd6c9fb402 = []byte{
|
|
||||||
// 417 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
|
||||||
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
|
|
||||||
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
|
|
||||||
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
|
|
||||||
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
|
|
||||||
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
|
|
||||||
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
|
|
||||||
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
|
|
||||||
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
|
|
||||||
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
|
|
||||||
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
|
|
||||||
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
|
|
||||||
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
|
|
||||||
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
|
|
||||||
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
|
|
||||||
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
|
|
||||||
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
|
|
||||||
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
|
|
||||||
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
|
|
||||||
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
|
|
||||||
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
|
|
||||||
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
|
|
||||||
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
|
|
||||||
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
|
|
||||||
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
|
|
||||||
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
|
|
||||||
0x00,
|
|
||||||
}
|
|
96
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
96
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
|
@ -1,96 +0,0 @@
|
||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "StructProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
|
|
||||||
// `Struct` represents a structured data value, consisting of fields
|
|
||||||
// which map to dynamically typed values. In some languages, `Struct`
|
|
||||||
// might be supported by a native representation. For example, in
|
|
||||||
// scripting languages like JS a struct is represented as an
|
|
||||||
// object. The details of that representation are described together
|
|
||||||
// with the proto support for the language.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Struct` is JSON object.
|
|
||||||
message Struct {
|
|
||||||
// Unordered map of dynamically typed values.
|
|
||||||
map<string, Value> fields = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// `Value` represents a dynamically typed value which can be either
|
|
||||||
// null, a number, a string, a boolean, a recursive struct value, or a
|
|
||||||
// list of values. A producer of value is expected to set one of that
|
|
||||||
// variants, absence of any variant indicates an error.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Value` is JSON value.
|
|
||||||
message Value {
|
|
||||||
// The kind of value.
|
|
||||||
oneof kind {
|
|
||||||
// Represents a null value.
|
|
||||||
NullValue null_value = 1;
|
|
||||||
// Represents a double value.
|
|
||||||
double number_value = 2;
|
|
||||||
// Represents a string value.
|
|
||||||
string string_value = 3;
|
|
||||||
// Represents a boolean value.
|
|
||||||
bool bool_value = 4;
|
|
||||||
// Represents a structured value.
|
|
||||||
Struct struct_value = 5;
|
|
||||||
// Represents a repeated `Value`.
|
|
||||||
ListValue list_value = 6;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// `NullValue` is a singleton enumeration to represent the null value for the
|
|
||||||
// `Value` type union.
|
|
||||||
//
|
|
||||||
// The JSON representation for `NullValue` is JSON `null`.
|
|
||||||
enum NullValue {
|
|
||||||
// Null value.
|
|
||||||
NULL_VALUE = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// `ListValue` is a wrapper around a repeated field of values.
|
|
||||||
//
|
|
||||||
// The JSON representation for `ListValue` is JSON array.
|
|
||||||
message ListValue {
|
|
||||||
// Repeated field of dynamically typed values.
|
|
||||||
repeated Value values = 1;
|
|
||||||
}
|
|
461
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
461
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
|
@ -1,461 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: google/protobuf/wrappers.proto
|
|
||||||
|
|
||||||
package wrappers
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/golang/protobuf/proto"
|
|
||||||
math "math"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
|
||||||
type DoubleValue struct {
|
|
||||||
// The double value.
|
|
||||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *DoubleValue) Reset() { *m = DoubleValue{} }
|
|
||||||
func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*DoubleValue) ProtoMessage() {}
|
|
||||||
func (*DoubleValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
|
|
||||||
|
|
||||||
func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_DoubleValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_DoubleValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *DoubleValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_DoubleValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *DoubleValue) GetValue() float64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `float`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
|
||||||
type FloatValue struct {
|
|
||||||
// The float value.
|
|
||||||
Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *FloatValue) Reset() { *m = FloatValue{} }
|
|
||||||
func (m *FloatValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*FloatValue) ProtoMessage() {}
|
|
||||||
func (*FloatValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
|
|
||||||
|
|
||||||
func (m *FloatValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_FloatValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_FloatValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_FloatValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *FloatValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_FloatValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_FloatValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *FloatValue) GetValue() float32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
type Int64Value struct {
|
|
||||||
// The int64 value.
|
|
||||||
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int64Value) Reset() { *m = Int64Value{} }
|
|
||||||
func (m *Int64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int64Value) ProtoMessage() {}
|
|
||||||
func (*Int64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
|
|
||||||
|
|
||||||
func (m *Int64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int64Value) GetValue() int64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
type UInt64Value struct {
|
|
||||||
// The uint64 value.
|
|
||||||
Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt64Value) Reset() { *m = UInt64Value{} }
|
|
||||||
func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt64Value) ProtoMessage() {}
|
|
||||||
func (*UInt64Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
|
|
||||||
|
|
||||||
func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt64Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt64Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt64Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt64Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt64Value) GetValue() uint64 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
type Int32Value struct {
|
|
||||||
// The int32 value.
|
|
||||||
Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Int32Value) Reset() { *m = Int32Value{} }
|
|
||||||
func (m *Int32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*Int32Value) ProtoMessage() {}
|
|
||||||
func (*Int32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
|
|
||||||
|
|
||||||
func (m *Int32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_Int32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_Int32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_Int32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *Int32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_Int32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_Int32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *Int32Value) GetValue() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
type UInt32Value struct {
|
|
||||||
// The uint32 value.
|
|
||||||
Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *UInt32Value) Reset() { *m = UInt32Value{} }
|
|
||||||
func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*UInt32Value) ProtoMessage() {}
|
|
||||||
func (*UInt32Value) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{5}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
|
|
||||||
|
|
||||||
func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_UInt32Value.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_UInt32Value.Size(m)
|
|
||||||
}
|
|
||||||
func (m *UInt32Value) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_UInt32Value.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *UInt32Value) GetValue() uint32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
type BoolValue struct {
|
|
||||||
// The bool value.
|
|
||||||
Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BoolValue) Reset() { *m = BoolValue{} }
|
|
||||||
func (m *BoolValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BoolValue) ProtoMessage() {}
|
|
||||||
func (*BoolValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{6}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
|
|
||||||
|
|
||||||
func (m *BoolValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BoolValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BoolValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BoolValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BoolValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BoolValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BoolValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BoolValue) GetValue() bool {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
type StringValue struct {
|
|
||||||
// The string value.
|
|
||||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StringValue) Reset() { *m = StringValue{} }
|
|
||||||
func (m *StringValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*StringValue) ProtoMessage() {}
|
|
||||||
func (*StringValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{7}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
|
|
||||||
|
|
||||||
func (m *StringValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_StringValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_StringValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_StringValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *StringValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_StringValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_StringValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *StringValue) GetValue() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
type BytesValue struct {
|
|
||||||
// The bytes value.
|
|
||||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *BytesValue) Reset() { *m = BytesValue{} }
|
|
||||||
func (m *BytesValue) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*BytesValue) ProtoMessage() {}
|
|
||||||
func (*BytesValue) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_5377b62bda767935, []int{8}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
|
|
||||||
|
|
||||||
func (m *BytesValue) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_BytesValue.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_BytesValue.Merge(m, src)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_BytesValue.Size(m)
|
|
||||||
}
|
|
||||||
func (m *BytesValue) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_BytesValue.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_BytesValue proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *BytesValue) GetValue() []byte {
|
|
||||||
if m != nil {
|
|
||||||
return m.Value
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
|
|
||||||
proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
|
|
||||||
proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
|
|
||||||
proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
|
|
||||||
proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
|
|
||||||
proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
|
|
||||||
proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
|
|
||||||
proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
|
|
||||||
proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
|
|
||||||
|
|
||||||
var fileDescriptor_5377b62bda767935 = []byte{
|
|
||||||
// 259 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
|
||||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
|
|
||||||
0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
|
|
||||||
0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
|
|
||||||
0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
|
|
||||||
0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
|
|
||||||
0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
|
|
||||||
0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
|
|
||||||
0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
|
|
||||||
0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
|
|
||||||
0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
|
|
||||||
0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
|
|
||||||
0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
|
|
||||||
0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
|
|
||||||
0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
|
|
||||||
0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
|
|
||||||
0x01, 0x00, 0x00,
|
|
||||||
}
|
|
118
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
118
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
generated
vendored
|
@ -1,118 +0,0 @@
|
||||||
// Protocol Buffers - Google's data interchange format
|
|
||||||
// Copyright 2008 Google Inc. All rights reserved.
|
|
||||||
// https://developers.google.com/protocol-buffers/
|
|
||||||
//
|
|
||||||
// Redistribution and use in source and binary forms, with or without
|
|
||||||
// modification, are permitted provided that the following conditions are
|
|
||||||
// met:
|
|
||||||
//
|
|
||||||
// * Redistributions of source code must retain the above copyright
|
|
||||||
// notice, this list of conditions and the following disclaimer.
|
|
||||||
// * Redistributions in binary form must reproduce the above
|
|
||||||
// copyright notice, this list of conditions and the following disclaimer
|
|
||||||
// in the documentation and/or other materials provided with the
|
|
||||||
// distribution.
|
|
||||||
// * Neither the name of Google Inc. nor the names of its
|
|
||||||
// contributors may be used to endorse or promote products derived from
|
|
||||||
// this software without specific prior written permission.
|
|
||||||
//
|
|
||||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
// Wrappers for primitive (non-message) types. These types are useful
|
|
||||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
|
||||||
// where we need to distinguish between the absence of a primitive
|
|
||||||
// typed field and its default value.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.protobuf;
|
|
||||||
|
|
||||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
|
||||||
option cc_enable_arenas = true;
|
|
||||||
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
|
|
||||||
option java_package = "com.google.protobuf";
|
|
||||||
option java_outer_classname = "WrappersProto";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option objc_class_prefix = "GPB";
|
|
||||||
|
|
||||||
// Wrapper message for `double`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `DoubleValue` is JSON number.
|
|
||||||
message DoubleValue {
|
|
||||||
// The double value.
|
|
||||||
double value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `float`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `FloatValue` is JSON number.
|
|
||||||
message FloatValue {
|
|
||||||
// The float value.
|
|
||||||
float value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int64Value` is JSON string.
|
|
||||||
message Int64Value {
|
|
||||||
// The int64 value.
|
|
||||||
int64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint64`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt64Value` is JSON string.
|
|
||||||
message UInt64Value {
|
|
||||||
// The uint64 value.
|
|
||||||
uint64 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `int32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `Int32Value` is JSON number.
|
|
||||||
message Int32Value {
|
|
||||||
// The int32 value.
|
|
||||||
int32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `uint32`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `UInt32Value` is JSON number.
|
|
||||||
message UInt32Value {
|
|
||||||
// The uint32 value.
|
|
||||||
uint32 value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bool`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
|
||||||
message BoolValue {
|
|
||||||
// The bool value.
|
|
||||||
bool value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `string`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `StringValue` is JSON string.
|
|
||||||
message StringValue {
|
|
||||||
// The string value.
|
|
||||||
string value = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper message for `bytes`.
|
|
||||||
//
|
|
||||||
// The JSON representation for `BytesValue` is JSON string.
|
|
||||||
message BytesValue {
|
|
||||||
// The bytes value.
|
|
||||||
bytes value = 1;
|
|
||||||
}
|
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
Copyright (c) 2015, Gengo, Inc.
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification,
|
|
||||||
are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* Neither the name of Gengo, Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from this
|
|
||||||
software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
||||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
||||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
|
||||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
||||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
generated
vendored
22
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|
||||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
|
||||||
|
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
proto_library(
|
|
||||||
name = "internal_proto",
|
|
||||||
srcs = ["stream_chunk.proto"],
|
|
||||||
deps = ["@com_google_protobuf//:any_proto"],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_proto_library(
|
|
||||||
name = "internal_go_proto",
|
|
||||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
|
|
||||||
proto = ":internal_proto",
|
|
||||||
)
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
embed = [":internal_go_proto"],
|
|
||||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
|
|
||||||
)
|
|
118
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
generated
vendored
118
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
generated
vendored
|
@ -1,118 +0,0 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: internal/stream_chunk.proto
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
|
||||||
import fmt "fmt"
|
|
||||||
import math "math"
|
|
||||||
import any "github.com/golang/protobuf/ptypes/any"
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|
||||||
|
|
||||||
// StreamError is a response type which is returned when
|
|
||||||
// streaming rpc returns an error.
|
|
||||||
type StreamError struct {
|
|
||||||
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
|
|
||||||
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
|
|
||||||
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
|
|
||||||
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
|
|
||||||
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
|
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
|
||||||
XXX_unrecognized []byte `json:"-"`
|
|
||||||
XXX_sizecache int32 `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StreamError) Reset() { *m = StreamError{} }
|
|
||||||
func (m *StreamError) String() string { return proto.CompactTextString(m) }
|
|
||||||
func (*StreamError) ProtoMessage() {}
|
|
||||||
func (*StreamError) Descriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
|
|
||||||
}
|
|
||||||
func (m *StreamError) XXX_Unmarshal(b []byte) error {
|
|
||||||
return xxx_messageInfo_StreamError.Unmarshal(m, b)
|
|
||||||
}
|
|
||||||
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
|
||||||
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
|
|
||||||
}
|
|
||||||
func (dst *StreamError) XXX_Merge(src proto.Message) {
|
|
||||||
xxx_messageInfo_StreamError.Merge(dst, src)
|
|
||||||
}
|
|
||||||
func (m *StreamError) XXX_Size() int {
|
|
||||||
return xxx_messageInfo_StreamError.Size(m)
|
|
||||||
}
|
|
||||||
func (m *StreamError) XXX_DiscardUnknown() {
|
|
||||||
xxx_messageInfo_StreamError.DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
var xxx_messageInfo_StreamError proto.InternalMessageInfo
|
|
||||||
|
|
||||||
func (m *StreamError) GetGrpcCode() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.GrpcCode
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StreamError) GetHttpCode() int32 {
|
|
||||||
if m != nil {
|
|
||||||
return m.HttpCode
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StreamError) GetMessage() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.Message
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StreamError) GetHttpStatus() string {
|
|
||||||
if m != nil {
|
|
||||||
return m.HttpStatus
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *StreamError) GetDetails() []*any.Any {
|
|
||||||
if m != nil {
|
|
||||||
return m.Details
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
|
|
||||||
// 223 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
|
|
||||||
0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
|
|
||||||
0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
|
|
||||||
0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
|
|
||||||
0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
|
|
||||||
0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
|
|
||||||
0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
|
|
||||||
0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
|
|
||||||
0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
|
|
||||||
0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
|
|
||||||
0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
|
|
||||||
0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
|
|
||||||
0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
|
|
||||||
0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
15
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
generated
vendored
15
vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
generated
vendored
|
@ -1,15 +0,0 @@
|
||||||
syntax = "proto3";
|
|
||||||
package grpc.gateway.runtime;
|
|
||||||
option go_package = "internal";
|
|
||||||
|
|
||||||
import "google/protobuf/any.proto";
|
|
||||||
|
|
||||||
// StreamError is a response type which is returned when
|
|
||||||
// streaming rpc returns an error.
|
|
||||||
message StreamError {
|
|
||||||
int32 grpc_code = 1;
|
|
||||||
int32 http_code = 2;
|
|
||||||
string message = 3;
|
|
||||||
string http_status = 4;
|
|
||||||
repeated google.protobuf.Any details = 5;
|
|
||||||
}
|
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
|
@ -1,80 +0,0 @@
|
||||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
|
||||||
|
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"context.go",
|
|
||||||
"convert.go",
|
|
||||||
"doc.go",
|
|
||||||
"errors.go",
|
|
||||||
"fieldmask.go",
|
|
||||||
"handler.go",
|
|
||||||
"marshal_json.go",
|
|
||||||
"marshal_jsonpb.go",
|
|
||||||
"marshal_proto.go",
|
|
||||||
"marshaler.go",
|
|
||||||
"marshaler_registry.go",
|
|
||||||
"mux.go",
|
|
||||||
"pattern.go",
|
|
||||||
"proto2_convert.go",
|
|
||||||
"proto_errors.go",
|
|
||||||
"query.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
|
||||||
deps = [
|
|
||||||
"//internal:go_default_library",
|
|
||||||
"//utilities:go_default_library",
|
|
||||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
|
||||||
"@com_github_golang_protobuf//proto:go_default_library",
|
|
||||||
"@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:any_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
|
||||||
"@org_golang_google_grpc//codes:go_default_library",
|
|
||||||
"@org_golang_google_grpc//grpclog:go_default_library",
|
|
||||||
"@org_golang_google_grpc//metadata:go_default_library",
|
|
||||||
"@org_golang_google_grpc//status:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
size = "small",
|
|
||||||
srcs = [
|
|
||||||
"context_test.go",
|
|
||||||
"errors_test.go",
|
|
||||||
"fieldmask_test.go",
|
|
||||||
"handler_test.go",
|
|
||||||
"marshal_json_test.go",
|
|
||||||
"marshal_jsonpb_test.go",
|
|
||||||
"marshal_proto_test.go",
|
|
||||||
"marshaler_registry_test.go",
|
|
||||||
"mux_test.go",
|
|
||||||
"pattern_test.go",
|
|
||||||
"query_test.go",
|
|
||||||
],
|
|
||||||
embed = [":go_default_library"],
|
|
||||||
deps = [
|
|
||||||
"//examples/proto/examplepb:go_default_library",
|
|
||||||
"//internal:go_default_library",
|
|
||||||
"//utilities:go_default_library",
|
|
||||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
|
||||||
"@com_github_golang_protobuf//proto:go_default_library",
|
|
||||||
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
|
|
||||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
|
||||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
|
||||||
"@org_golang_google_grpc//codes:go_default_library",
|
|
||||||
"@org_golang_google_grpc//metadata:go_default_library",
|
|
||||||
"@org_golang_google_grpc//status:go_default_library",
|
|
||||||
],
|
|
||||||
)
|
|
210
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
210
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
|
@ -1,210 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
|
||||||
// parameters to or from a gRPC call.
|
|
||||||
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
|
||||||
|
|
||||||
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
|
||||||
// by the IANA) when added to the gRPC context.
|
|
||||||
const MetadataPrefix = "grpcgateway-"
|
|
||||||
|
|
||||||
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
|
||||||
// HTTP headers in a response handled by grpc-gateway
|
|
||||||
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
|
||||||
|
|
||||||
const metadataGrpcTimeout = "Grpc-Timeout"
|
|
||||||
const metadataHeaderBinarySuffix = "-Bin"
|
|
||||||
|
|
||||||
const xForwardedFor = "X-Forwarded-For"
|
|
||||||
const xForwardedHost = "X-Forwarded-Host"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
|
||||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
|
||||||
DefaultContextTimeout = 0 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
func decodeBinHeader(v string) ([]byte, error) {
|
|
||||||
if len(v)%4 == 0 {
|
|
||||||
// Input was padded, or padding was not necessary.
|
|
||||||
return base64.StdEncoding.DecodeString(v)
|
|
||||||
}
|
|
||||||
return base64.RawStdEncoding.DecodeString(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
AnnotateContext adds context information such as metadata from the request.
|
|
||||||
|
|
||||||
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
|
||||||
except that the forwarded destination is not another HTTP service but rather
|
|
||||||
a gRPC service.
|
|
||||||
*/
|
|
||||||
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
|
|
||||||
var pairs []string
|
|
||||||
timeout := DefaultContextTimeout
|
|
||||||
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
|
||||||
var err error
|
|
||||||
timeout, err = timeoutDecode(tm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, vals := range req.Header {
|
|
||||||
for _, val := range vals {
|
|
||||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
|
||||||
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
|
||||||
if key == "Authorization" {
|
|
||||||
pairs = append(pairs, "authorization", val)
|
|
||||||
}
|
|
||||||
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
|
||||||
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
|
||||||
// encode before sending to server, we need to decode it first.
|
|
||||||
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
|
||||||
b, err := decodeBinHeader(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
val = string(b)
|
|
||||||
}
|
|
||||||
pairs = append(pairs, h, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if host := req.Header.Get(xForwardedHost); host != "" {
|
|
||||||
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
|
||||||
} else if req.Host != "" {
|
|
||||||
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
if addr := req.RemoteAddr; addr != "" {
|
|
||||||
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
|
||||||
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
|
||||||
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
|
||||||
} else {
|
|
||||||
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
grpclog.Infof("invalid remote addr: %s", addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if timeout != 0 {
|
|
||||||
ctx, _ = context.WithTimeout(ctx, timeout)
|
|
||||||
}
|
|
||||||
if len(pairs) == 0 {
|
|
||||||
return ctx, nil
|
|
||||||
}
|
|
||||||
md := metadata.Pairs(pairs...)
|
|
||||||
for _, mda := range mux.metadataAnnotators {
|
|
||||||
md = metadata.Join(md, mda(ctx, req))
|
|
||||||
}
|
|
||||||
return metadata.NewOutgoingContext(ctx, md), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerMetadata consists of metadata sent from gRPC server.
|
|
||||||
type ServerMetadata struct {
|
|
||||||
HeaderMD metadata.MD
|
|
||||||
TrailerMD metadata.MD
|
|
||||||
}
|
|
||||||
|
|
||||||
type serverMetadataKey struct{}
|
|
||||||
|
|
||||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
|
||||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
|
||||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
|
||||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
|
||||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeoutDecode(s string) (time.Duration, error) {
|
|
||||||
size := len(s)
|
|
||||||
if size < 2 {
|
|
||||||
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
|
||||||
}
|
|
||||||
d, ok := timeoutUnitToDuration(s[size-1])
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
|
||||||
}
|
|
||||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return d * time.Duration(t), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
|
||||||
switch u {
|
|
||||||
case 'H':
|
|
||||||
return time.Hour, true
|
|
||||||
case 'M':
|
|
||||||
return time.Minute, true
|
|
||||||
case 'S':
|
|
||||||
return time.Second, true
|
|
||||||
case 'm':
|
|
||||||
return time.Millisecond, true
|
|
||||||
case 'u':
|
|
||||||
return time.Microsecond, true
|
|
||||||
case 'n':
|
|
||||||
return time.Nanosecond, true
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
|
||||||
// permenant request headers maintained by IANA.
|
|
||||||
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
|
||||||
func isPermanentHTTPHeader(hdr string) bool {
|
|
||||||
switch hdr {
|
|
||||||
case
|
|
||||||
"Accept",
|
|
||||||
"Accept-Charset",
|
|
||||||
"Accept-Language",
|
|
||||||
"Accept-Ranges",
|
|
||||||
"Authorization",
|
|
||||||
"Cache-Control",
|
|
||||||
"Content-Type",
|
|
||||||
"Cookie",
|
|
||||||
"Date",
|
|
||||||
"Expect",
|
|
||||||
"From",
|
|
||||||
"Host",
|
|
||||||
"If-Match",
|
|
||||||
"If-Modified-Since",
|
|
||||||
"If-None-Match",
|
|
||||||
"If-Schedule-Tag-Match",
|
|
||||||
"If-Unmodified-Since",
|
|
||||||
"Max-Forwards",
|
|
||||||
"Origin",
|
|
||||||
"Pragma",
|
|
||||||
"Referer",
|
|
||||||
"User-Agent",
|
|
||||||
"Via",
|
|
||||||
"Warning":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
312
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
312
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
|
@ -1,312 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/jsonpb"
|
|
||||||
"github.com/golang/protobuf/ptypes/duration"
|
|
||||||
"github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
"github.com/golang/protobuf/ptypes/wrappers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// String just returns the given string.
|
|
||||||
// It is just for compatibility to other types.
|
|
||||||
func String(val string) (string, error) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice converts 'val' where individual strings are separated by
|
|
||||||
// 'sep' into a string slice.
|
|
||||||
func StringSlice(val, sep string) ([]string, error) {
|
|
||||||
return strings.Split(val, sep), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool converts the given string representation of a boolean value into bool.
|
|
||||||
func Bool(val string) (bool, error) {
|
|
||||||
return strconv.ParseBool(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice converts 'val' where individual booleans are separated by
|
|
||||||
// 'sep' into a bool slice.
|
|
||||||
func BoolSlice(val, sep string) ([]bool, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]bool, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Bool(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 converts the given string representation into representation of a floating point number into float64.
|
|
||||||
func Float64(val string) (float64, error) {
|
|
||||||
return strconv.ParseFloat(val, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
|
||||||
// 'sep' into a float64 slice.
|
|
||||||
func Float64Slice(val, sep string) ([]float64, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]float64, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Float64(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32 converts the given string representation of a floating point number into float32.
|
|
||||||
func Float32(val string) (float32, error) {
|
|
||||||
f, err := strconv.ParseFloat(val, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return float32(f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
|
||||||
// 'sep' into a float32 slice.
|
|
||||||
func Float32Slice(val, sep string) ([]float32, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]float32, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Float32(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 converts the given string representation of an integer into int64.
|
|
||||||
func Int64(val string) (int64, error) {
|
|
||||||
return strconv.ParseInt(val, 0, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Slice converts 'val' where individual integers are separated by
|
|
||||||
// 'sep' into a int64 slice.
|
|
||||||
func Int64Slice(val, sep string) ([]int64, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]int64, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Int64(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 converts the given string representation of an integer into int32.
|
|
||||||
func Int32(val string) (int32, error) {
|
|
||||||
i, err := strconv.ParseInt(val, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return int32(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Slice converts 'val' where individual integers are separated by
|
|
||||||
// 'sep' into a int32 slice.
|
|
||||||
func Int32Slice(val, sep string) ([]int32, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]int32, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Int32(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 converts the given string representation of an integer into uint64.
|
|
||||||
func Uint64(val string) (uint64, error) {
|
|
||||||
return strconv.ParseUint(val, 0, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64Slice converts 'val' where individual integers are separated by
|
|
||||||
// 'sep' into a uint64 slice.
|
|
||||||
func Uint64Slice(val, sep string) ([]uint64, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]uint64, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Uint64(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32 converts the given string representation of an integer into uint32.
|
|
||||||
func Uint32(val string) (uint32, error) {
|
|
||||||
i, err := strconv.ParseUint(val, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint32(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32Slice converts 'val' where individual integers are separated by
|
|
||||||
// 'sep' into a uint32 slice.
|
|
||||||
func Uint32Slice(val, sep string) ([]uint32, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]uint32, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Uint32(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
|
||||||
// A bytes sequence is encoded in URL-safe base64 without padding
|
|
||||||
func Bytes(val string) ([]byte, error) {
|
|
||||||
b, err := base64.StdEncoding.DecodeString(val)
|
|
||||||
if err != nil {
|
|
||||||
b, err = base64.URLEncoding.DecodeString(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
|
||||||
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
|
||||||
func BytesSlice(val, sep string) ([][]byte, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([][]byte, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Bytes(v)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
|
||||||
func Timestamp(val string) (*timestamp.Timestamp, error) {
|
|
||||||
var r *timestamp.Timestamp
|
|
||||||
err := jsonpb.UnmarshalString(val, r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Duration converts the given string into a timestamp.Duration.
|
|
||||||
func Duration(val string) (*duration.Duration, error) {
|
|
||||||
var r *duration.Duration
|
|
||||||
err := jsonpb.UnmarshalString(val, r)
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enum converts the given string into an int32 that should be type casted into the
|
|
||||||
// correct enum proto type.
|
|
||||||
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
|
||||||
e, ok := enumValMap[val]
|
|
||||||
if ok {
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
i, err := Int32(val)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("%s is not valid", val)
|
|
||||||
}
|
|
||||||
for _, v := range enumValMap {
|
|
||||||
if v == i {
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, fmt.Errorf("%s is not valid", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
|
||||||
// into a int32 slice. Each individual int32 should be type casted into the
|
|
||||||
// correct enum proto type.
|
|
||||||
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
|
||||||
s := strings.Split(val, sep)
|
|
||||||
values := make([]int32, len(s))
|
|
||||||
for i, v := range s {
|
|
||||||
value, err := Enum(v, enumValMap)
|
|
||||||
if err != nil {
|
|
||||||
return values, err
|
|
||||||
}
|
|
||||||
values[i] = value
|
|
||||||
}
|
|
||||||
return values, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Support fot google.protobuf.wrappers on top of primitive types
|
|
||||||
*/
|
|
||||||
|
|
||||||
// StringValue well-known type support as wrapper around string type
|
|
||||||
func StringValue(val string) (*wrappers.StringValue, error) {
|
|
||||||
return &wrappers.StringValue{Value: val}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FloatValue well-known type support as wrapper around float32 type
|
|
||||||
func FloatValue(val string) (*wrappers.FloatValue, error) {
|
|
||||||
parsedVal, err := Float32(val)
|
|
||||||
return &wrappers.FloatValue{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoubleValue well-known type support as wrapper around float64 type
|
|
||||||
func DoubleValue(val string) (*wrappers.DoubleValue, error) {
|
|
||||||
parsedVal, err := Float64(val)
|
|
||||||
return &wrappers.DoubleValue{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolValue well-known type support as wrapper around bool type
|
|
||||||
func BoolValue(val string) (*wrappers.BoolValue, error) {
|
|
||||||
parsedVal, err := Bool(val)
|
|
||||||
return &wrappers.BoolValue{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32Value well-known type support as wrapper around int32 type
|
|
||||||
func Int32Value(val string) (*wrappers.Int32Value, error) {
|
|
||||||
parsedVal, err := Int32(val)
|
|
||||||
return &wrappers.Int32Value{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UInt32Value well-known type support as wrapper around uint32 type
|
|
||||||
func UInt32Value(val string) (*wrappers.UInt32Value, error) {
|
|
||||||
parsedVal, err := Uint32(val)
|
|
||||||
return &wrappers.UInt32Value{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Value well-known type support as wrapper around int64 type
|
|
||||||
func Int64Value(val string) (*wrappers.Int64Value, error) {
|
|
||||||
parsedVal, err := Int64(val)
|
|
||||||
return &wrappers.Int64Value{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UInt64Value well-known type support as wrapper around uint64 type
|
|
||||||
func UInt64Value(val string) (*wrappers.UInt64Value, error) {
|
|
||||||
parsedVal, err := Uint64(val)
|
|
||||||
return &wrappers.UInt64Value{Value: parsedVal}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesValue well-known type support as wrapper around bytes[] type
|
|
||||||
func BytesValue(val string) (*wrappers.BytesValue, error) {
|
|
||||||
parsedVal, err := Bytes(val)
|
|
||||||
return &wrappers.BytesValue{Value: parsedVal}, err
|
|
||||||
}
|
|
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
/*
|
|
||||||
Package runtime contains runtime helper functions used by
|
|
||||||
servers which protoc-gen-grpc-gateway generates.
|
|
||||||
*/
|
|
||||||
package runtime
|
|
145
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
145
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
|
@ -1,145 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/golang/protobuf/ptypes/any"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
|
||||||
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
|
||||||
func HTTPStatusFromCode(code codes.Code) int {
|
|
||||||
switch code {
|
|
||||||
case codes.OK:
|
|
||||||
return http.StatusOK
|
|
||||||
case codes.Canceled:
|
|
||||||
return http.StatusRequestTimeout
|
|
||||||
case codes.Unknown:
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
case codes.InvalidArgument:
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case codes.DeadlineExceeded:
|
|
||||||
return http.StatusGatewayTimeout
|
|
||||||
case codes.NotFound:
|
|
||||||
return http.StatusNotFound
|
|
||||||
case codes.AlreadyExists:
|
|
||||||
return http.StatusConflict
|
|
||||||
case codes.PermissionDenied:
|
|
||||||
return http.StatusForbidden
|
|
||||||
case codes.Unauthenticated:
|
|
||||||
return http.StatusUnauthorized
|
|
||||||
case codes.ResourceExhausted:
|
|
||||||
return http.StatusTooManyRequests
|
|
||||||
case codes.FailedPrecondition:
|
|
||||||
return http.StatusPreconditionFailed
|
|
||||||
case codes.Aborted:
|
|
||||||
return http.StatusConflict
|
|
||||||
case codes.OutOfRange:
|
|
||||||
return http.StatusBadRequest
|
|
||||||
case codes.Unimplemented:
|
|
||||||
return http.StatusNotImplemented
|
|
||||||
case codes.Internal:
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
case codes.Unavailable:
|
|
||||||
return http.StatusServiceUnavailable
|
|
||||||
case codes.DataLoss:
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
|
||||||
return http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// HTTPError replies to the request with the error.
|
|
||||||
// You can set a custom function to this variable to customize error format.
|
|
||||||
HTTPError = DefaultHTTPError
|
|
||||||
// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
|
|
||||||
OtherErrorHandler = DefaultOtherErrorHandler
|
|
||||||
)
|
|
||||||
|
|
||||||
type errorBody struct {
|
|
||||||
Error string `protobuf:"bytes,1,name=error" json:"error"`
|
|
||||||
// This is to make the error more compatible with users that expect errors to be Status objects:
|
|
||||||
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
|
|
||||||
// It should be the exact same message as the Error field.
|
|
||||||
Message string `protobuf:"bytes,1,name=message" json:"message"`
|
|
||||||
Code int32 `protobuf:"varint,2,name=code" json:"code"`
|
|
||||||
Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make this also conform to proto.Message for builtin JSONPb Marshaler
|
|
||||||
func (e *errorBody) Reset() { *e = errorBody{} }
|
|
||||||
func (e *errorBody) String() string { return proto.CompactTextString(e) }
|
|
||||||
func (*errorBody) ProtoMessage() {}
|
|
||||||
|
|
||||||
// DefaultHTTPError is the default implementation of HTTPError.
|
|
||||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
|
||||||
// If otherwise, it replies with http.StatusInternalServerError.
|
|
||||||
//
|
|
||||||
// The response body returned by this function is a JSON object,
|
|
||||||
// which contains a member whose key is "error" and whose value is err.Error().
|
|
||||||
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
|
||||||
const fallback = `{"error": "failed to marshal error message"}`
|
|
||||||
|
|
||||||
s, ok := status.FromError(err)
|
|
||||||
if !ok {
|
|
||||||
s = status.New(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Del("Trailer")
|
|
||||||
|
|
||||||
contentType := marshaler.ContentType()
|
|
||||||
// Check marshaler on run time in order to keep backwards compatability
|
|
||||||
// An interface param needs to be added to the ContentType() function on
|
|
||||||
// the Marshal interface to be able to remove this check
|
|
||||||
if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
|
|
||||||
pb := s.Proto()
|
|
||||||
contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
|
|
||||||
body := &errorBody{
|
|
||||||
Error: s.Message(),
|
|
||||||
Message: s.Message(),
|
|
||||||
Code: int32(s.Code()),
|
|
||||||
Details: s.Proto().GetDetails(),
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, merr := marshaler.Marshal(body)
|
|
||||||
if merr != nil {
|
|
||||||
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
if _, err := io.WriteString(w, fallback); err != nil {
|
|
||||||
grpclog.Infof("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
|
||||||
handleForwardResponseTrailerHeader(w, md)
|
|
||||||
st := HTTPStatusFromCode(s.Code())
|
|
||||||
w.WriteHeader(st)
|
|
||||||
if _, err := w.Write(buf); err != nil {
|
|
||||||
grpclog.Infof("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseTrailer(w, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
|
|
||||||
// It simply writes a string representation of the given error into "w".
|
|
||||||
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
|
|
||||||
http.Error(w, msg, code)
|
|
||||||
}
|
|
70
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
70
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
|
@ -1,70 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/protoc-gen-go/generator"
|
|
||||||
"google.golang.org/genproto/protobuf/field_mask"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
|
||||||
func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) {
|
|
||||||
fm := &field_mask.FieldMask{}
|
|
||||||
var root interface{}
|
|
||||||
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return fm, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
queue := []fieldMaskPathItem{{node: root}}
|
|
||||||
for len(queue) > 0 {
|
|
||||||
// dequeue an item
|
|
||||||
item := queue[0]
|
|
||||||
queue = queue[1:]
|
|
||||||
|
|
||||||
if m, ok := item.node.(map[string]interface{}); ok {
|
|
||||||
// if the item is an object, then enqueue all of its children
|
|
||||||
for k, v := range m {
|
|
||||||
queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v})
|
|
||||||
}
|
|
||||||
} else if len(item.path) > 0 {
|
|
||||||
// otherwise, it's a leaf node so print its path
|
|
||||||
fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fm, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
|
||||||
type fieldMaskPathItem struct {
|
|
||||||
// the list of prior fields leading up to node
|
|
||||||
path []string
|
|
||||||
|
|
||||||
// a generic decoded json object the current item to inspect for further path extraction
|
|
||||||
node interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic
|
|
||||||
// that's used for naming protobuf fields in Go.
|
|
||||||
func CamelCaseFieldMask(mask *field_mask.FieldMask) {
|
|
||||||
if mask == nil || mask.Paths == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var newPaths []string
|
|
||||||
for _, path := range mask.Paths {
|
|
||||||
lowerCasedParts := strings.Split(path, ".")
|
|
||||||
var camelCasedParts []string
|
|
||||||
for _, part := range lowerCasedParts {
|
|
||||||
camelCasedParts = append(camelCasedParts, generator.CamelCase(part))
|
|
||||||
}
|
|
||||||
newPaths = append(newPaths, strings.Join(camelCasedParts, "."))
|
|
||||||
}
|
|
||||||
|
|
||||||
mask.Paths = newPaths
|
|
||||||
}
|
|
215
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
215
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
|
@ -1,215 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
|
|
||||||
"context"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/golang/protobuf/ptypes/any"
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
|
||||||
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
|
||||||
f, ok := w.(http.Flusher)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("Flush not supported in %T", w)
|
|
||||||
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
|
||||||
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
|
||||||
|
|
||||||
w.Header().Set("Transfer-Encoding", "chunked")
|
|
||||||
w.Header().Set("Content-Type", marshaler.ContentType())
|
|
||||||
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
|
||||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var delimiter []byte
|
|
||||||
if d, ok := marshaler.(Delimited); ok {
|
|
||||||
delimiter = d.Delimiter()
|
|
||||||
} else {
|
|
||||||
delimiter = []byte("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
var wroteHeader bool
|
|
||||||
for {
|
|
||||||
resp, err := recv()
|
|
||||||
if err == io.EOF {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
|
||||||
handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := marshaler.Marshal(streamChunk(resp, nil))
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
|
||||||
handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err = w.Write(buf); err != nil {
|
|
||||||
grpclog.Infof("Failed to send response chunk: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
wroteHeader = true
|
|
||||||
if _, err = w.Write(delimiter); err != nil {
|
|
||||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
|
||||||
for k, vs := range md.HeaderMD {
|
|
||||||
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
|
||||||
for _, v := range vs {
|
|
||||||
w.Header().Add(h, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
|
||||||
for k := range md.TrailerMD {
|
|
||||||
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
|
||||||
w.Header().Add("Trailer", tKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
|
||||||
for k, vs := range md.TrailerMD {
|
|
||||||
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
|
||||||
for _, v := range vs {
|
|
||||||
w.Header().Add(tKey, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// responseBody interface contains method for getting field for marshaling to the response body
|
|
||||||
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
|
||||||
type responseBody interface {
|
|
||||||
XXX_ResponseBody() interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
|
||||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
|
||||||
handleForwardResponseTrailerHeader(w, md)
|
|
||||||
|
|
||||||
contentType := marshaler.ContentType()
|
|
||||||
// Check marshaler on run time in order to keep backwards compatability
|
|
||||||
// An interface param needs to be added to the ContentType() function on
|
|
||||||
// the Marshal interface to be able to remove this check
|
|
||||||
if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
|
|
||||||
contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
|
|
||||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
|
||||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var buf []byte
|
|
||||||
var err error
|
|
||||||
if rb, ok := resp.(responseBody); ok {
|
|
||||||
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
|
||||||
} else {
|
|
||||||
buf, err = marshaler.Marshal(resp)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("Marshal error: %v", err)
|
|
||||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = w.Write(buf); err != nil {
|
|
||||||
grpclog.Infof("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseTrailer(w, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
|
||||||
if len(opts) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(ctx, w, resp); err != nil {
|
|
||||||
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) {
|
|
||||||
buf, merr := marshaler.Marshal(streamChunk(nil, err))
|
|
||||||
if merr != nil {
|
|
||||||
grpclog.Infof("Failed to marshal an error: %v", merr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !wroteHeader {
|
|
||||||
s, ok := status.FromError(err)
|
|
||||||
if !ok {
|
|
||||||
s = status.New(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
w.WriteHeader(HTTPStatusFromCode(s.Code()))
|
|
||||||
}
|
|
||||||
if _, werr := w.Write(buf); werr != nil {
|
|
||||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func streamChunk(result proto.Message, err error) map[string]proto.Message {
|
|
||||||
if err != nil {
|
|
||||||
grpcCode := codes.Unknown
|
|
||||||
grpcMessage := err.Error()
|
|
||||||
var grpcDetails []*any.Any
|
|
||||||
if s, ok := status.FromError(err); ok {
|
|
||||||
grpcCode = s.Code()
|
|
||||||
grpcMessage = s.Message()
|
|
||||||
grpcDetails = s.Proto().GetDetails()
|
|
||||||
}
|
|
||||||
httpCode := HTTPStatusFromCode(grpcCode)
|
|
||||||
return map[string]proto.Message{
|
|
||||||
"error": &internal.StreamError{
|
|
||||||
GrpcCode: int32(grpcCode),
|
|
||||||
HttpCode: int32(httpCode),
|
|
||||||
Message: grpcMessage,
|
|
||||||
HttpStatus: http.StatusText(httpCode),
|
|
||||||
Details: grpcDetails,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if result == nil {
|
|
||||||
return streamChunk(nil, fmt.Errorf("empty response"))
|
|
||||||
}
|
|
||||||
return map[string]proto.Message{"result": result}
|
|
||||||
}
|
|
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
|
@ -1,43 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"google.golang.org/genproto/googleapis/api/httpbody"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
|
|
||||||
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
|
|
||||||
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
|
|
||||||
Marshaler: &JSONPb{OrigName: true},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
|
||||||
// google.api.HttpBody message as the full response body if it is
|
|
||||||
// the actual message used as the response. If not, then this will
|
|
||||||
// simply fallback to the Marshaler specified as its default Marshaler.
|
|
||||||
type HTTPBodyMarshaler struct {
|
|
||||||
Marshaler
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentType implementation to keep backwards compatability with marshal interface
|
|
||||||
func (h *HTTPBodyMarshaler) ContentType() string {
|
|
||||||
return h.ContentTypeFromMessage(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
|
|
||||||
// its specified content type otherwise fall back to the default Marshaler.
|
|
||||||
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
|
|
||||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
|
||||||
return httpBody.GetContentType()
|
|
||||||
}
|
|
||||||
return h.Marshaler.ContentType()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal marshals "v" by returning the body bytes if v is a
|
|
||||||
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
|
||||||
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
|
||||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
|
||||||
return httpBody.Data, nil
|
|
||||||
}
|
|
||||||
return h.Marshaler.Marshal(v)
|
|
||||||
}
|
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
|
@ -1,45 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
|
||||||
// with the standard "encoding/json" package of Golang.
|
|
||||||
// Although it is generally faster for simple proto messages than JSONPb,
|
|
||||||
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
|
||||||
//
|
|
||||||
// The NewEncoder and NewDecoder types return *json.Encoder and
|
|
||||||
// *json.Decoder respectively.
|
|
||||||
type JSONBuiltin struct{}
|
|
||||||
|
|
||||||
// ContentType always Returns "application/json".
|
|
||||||
func (*JSONBuiltin) ContentType() string {
|
|
||||||
return "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal marshals "v" into JSON
|
|
||||||
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal unmarshals JSON data into "v".
|
|
||||||
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
|
||||||
return json.Unmarshal(data, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
|
||||||
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
|
||||||
return json.NewDecoder(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
|
||||||
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
|
||||||
return json.NewEncoder(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delimiter for newline encoded JSON streams.
|
|
||||||
func (j *JSONBuiltin) Delimiter() []byte {
|
|
||||||
return []byte("\n")
|
|
||||||
}
|
|
242
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
242
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
|
@ -1,242 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/jsonpb"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
|
||||||
// with the "github.com/golang/protobuf/jsonpb".
|
|
||||||
// It supports fully functionality of protobuf unlike JSONBuiltin.
|
|
||||||
//
|
|
||||||
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
|
||||||
// *json.Decoder methods can be used.
|
|
||||||
type JSONPb jsonpb.Marshaler
|
|
||||||
|
|
||||||
// ContentType always returns "application/json".
|
|
||||||
func (*JSONPb) ContentType() string {
|
|
||||||
return "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal marshals "v" into JSON.
|
|
||||||
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
|
||||||
if _, ok := v.(proto.Message); !ok {
|
|
||||||
return j.marshalNonProtoField(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := j.marshalTo(&buf, v); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
|
||||||
p, ok := v.(proto.Message)
|
|
||||||
if !ok {
|
|
||||||
buf, err := j.marshalNonProtoField(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return (*jsonpb.Marshaler)(j).Marshal(w, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
|
||||||
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
|
||||||
)
|
|
||||||
|
|
||||||
// marshalNonProto marshals a non-message field of a protobuf message.
|
|
||||||
// This function does not correctly marshals arbitrary data structure into JSON,
|
|
||||||
// but it is only capable of marshaling non-message field values of protobuf,
|
|
||||||
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
|
||||||
// integer/string types to primitives/enums/pointers to messages.
|
|
||||||
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
|
||||||
if v == nil {
|
|
||||||
return []byte("null"), nil
|
|
||||||
}
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
for rv.Kind() == reflect.Ptr {
|
|
||||||
if rv.IsNil() {
|
|
||||||
return []byte("null"), nil
|
|
||||||
}
|
|
||||||
rv = rv.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if rv.Kind() == reflect.Slice {
|
|
||||||
if rv.IsNil() {
|
|
||||||
if j.EmitDefaults {
|
|
||||||
return []byte("[]"), nil
|
|
||||||
}
|
|
||||||
return []byte("null"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if rv.Type().Elem().Implements(protoMessageType) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err := buf.WriteByte('[')
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
if i != 0 {
|
|
||||||
err = buf.WriteByte(',')
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = buf.WriteByte(']')
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rv.Kind() == reflect.Map {
|
|
||||||
m := make(map[string]*json.RawMessage)
|
|
||||||
for _, k := range rv.MapKeys() {
|
|
||||||
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
|
||||||
}
|
|
||||||
if j.Indent != "" {
|
|
||||||
return json.MarshalIndent(m, "", j.Indent)
|
|
||||||
}
|
|
||||||
return json.Marshal(m)
|
|
||||||
}
|
|
||||||
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
|
||||||
return json.Marshal(enum.String())
|
|
||||||
}
|
|
||||||
return json.Marshal(rv.Interface())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal unmarshals JSON "data" into "v"
|
|
||||||
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
|
||||||
return unmarshalJSONPb(data, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
|
||||||
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
|
||||||
d := json.NewDecoder(r)
|
|
||||||
return DecoderWrapper{Decoder: d}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
|
||||||
// support for protos to the Decode method.
|
|
||||||
type DecoderWrapper struct {
|
|
||||||
*json.Decoder
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode wraps the embedded decoder's Decode method to support
|
|
||||||
// protos using a jsonpb.Unmarshaler.
|
|
||||||
func (d DecoderWrapper) Decode(v interface{}) error {
|
|
||||||
return decodeJSONPb(d.Decoder, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
|
||||||
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
|
||||||
return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalJSONPb(data []byte, v interface{}) error {
|
|
||||||
d := json.NewDecoder(bytes.NewReader(data))
|
|
||||||
return decodeJSONPb(d, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeJSONPb(d *json.Decoder, v interface{}) error {
|
|
||||||
p, ok := v.(proto.Message)
|
|
||||||
if !ok {
|
|
||||||
return decodeNonProtoField(d, v)
|
|
||||||
}
|
|
||||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
|
||||||
return unmarshaler.UnmarshalNext(d, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr {
|
|
||||||
return fmt.Errorf("%T is not a pointer", v)
|
|
||||||
}
|
|
||||||
for rv.Kind() == reflect.Ptr {
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.New(rv.Type().Elem()))
|
|
||||||
}
|
|
||||||
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
|
||||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
|
||||||
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
|
|
||||||
}
|
|
||||||
rv = rv.Elem()
|
|
||||||
}
|
|
||||||
if rv.Kind() == reflect.Map {
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.MakeMap(rv.Type()))
|
|
||||||
}
|
|
||||||
conv, ok := convFromType[rv.Type().Key().Kind()]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
|
||||||
}
|
|
||||||
|
|
||||||
m := make(map[string]*json.RawMessage)
|
|
||||||
if err := d.Decode(&m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for k, v := range m {
|
|
||||||
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
|
||||||
if err := result[1].Interface(); err != nil {
|
|
||||||
return err.(error)
|
|
||||||
}
|
|
||||||
bk := result[0]
|
|
||||||
bv := reflect.New(rv.Type().Elem())
|
|
||||||
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rv.SetMapIndex(bk, bv.Elem())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, ok := rv.Interface().(protoEnum); ok {
|
|
||||||
var repr interface{}
|
|
||||||
if err := d.Decode(&repr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch repr.(type) {
|
|
||||||
case string:
|
|
||||||
// TODO(yugui) Should use proto.StructProperties?
|
|
||||||
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
|
||||||
case float64:
|
|
||||||
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d.Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
type protoEnum interface {
|
|
||||||
fmt.Stringer
|
|
||||||
EnumDescriptor() ([]byte, []int)
|
|
||||||
}
|
|
||||||
|
|
||||||
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
|
||||||
|
|
||||||
// Delimiter for newline encoded JSON streams.
|
|
||||||
func (j *JSONPb) Delimiter() []byte {
|
|
||||||
return []byte("\n")
|
|
||||||
}
|
|
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
|
@ -1,62 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"errors"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
|
||||||
type ProtoMarshaller struct{}
|
|
||||||
|
|
||||||
// ContentType always returns "application/octet-stream".
|
|
||||||
func (*ProtoMarshaller) ContentType() string {
|
|
||||||
return "application/octet-stream"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal marshals "value" into Proto
|
|
||||||
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
|
||||||
message, ok := value.(proto.Message)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("unable to marshal non proto field")
|
|
||||||
}
|
|
||||||
return proto.Marshal(message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal unmarshals proto "data" into "value"
|
|
||||||
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
|
||||||
message, ok := value.(proto.Message)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("unable to unmarshal non proto field")
|
|
||||||
}
|
|
||||||
return proto.Unmarshal(data, message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
|
||||||
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
|
||||||
return DecoderFunc(func(value interface{}) error {
|
|
||||||
buffer, err := ioutil.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return marshaller.Unmarshal(buffer, value)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
|
||||||
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
|
||||||
return EncoderFunc(func(value interface{}) error {
|
|
||||||
buffer, err := marshaller.Marshal(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = writer.Write(buffer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
48
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
48
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
|
@ -1,48 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
|
||||||
type Marshaler interface {
|
|
||||||
// Marshal marshals "v" into byte sequence.
|
|
||||||
Marshal(v interface{}) ([]byte, error)
|
|
||||||
// Unmarshal unmarshals "data" into "v".
|
|
||||||
// "v" must be a pointer value.
|
|
||||||
Unmarshal(data []byte, v interface{}) error
|
|
||||||
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
|
||||||
NewDecoder(r io.Reader) Decoder
|
|
||||||
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
|
||||||
NewEncoder(w io.Writer) Encoder
|
|
||||||
// ContentType returns the Content-Type which this marshaler is responsible for.
|
|
||||||
ContentType() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decoder decodes a byte sequence
|
|
||||||
type Decoder interface {
|
|
||||||
Decode(v interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encoder encodes gRPC payloads / fields into byte sequence.
|
|
||||||
type Encoder interface {
|
|
||||||
Encode(v interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecoderFunc adapts an decoder function into Decoder.
|
|
||||||
type DecoderFunc func(v interface{}) error
|
|
||||||
|
|
||||||
// Decode delegates invocations to the underlying function itself.
|
|
||||||
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
|
||||||
|
|
||||||
// EncoderFunc adapts an encoder function into Encoder
|
|
||||||
type EncoderFunc func(v interface{}) error
|
|
||||||
|
|
||||||
// Encode delegates invocations to the underlying function itself.
|
|
||||||
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
|
||||||
|
|
||||||
// Delimited defines the streaming delimiter.
|
|
||||||
type Delimited interface {
|
|
||||||
// Delimiter returns the record seperator for the stream.
|
|
||||||
Delimiter() []byte
|
|
||||||
}
|
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
|
@ -1,91 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
|
||||||
// a registered MIME type.
|
|
||||||
const MIMEWildcard = "*"
|
|
||||||
|
|
||||||
var (
|
|
||||||
acceptHeader = http.CanonicalHeaderKey("Accept")
|
|
||||||
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
|
||||||
|
|
||||||
defaultMarshaler = &JSONPb{OrigName: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
|
||||||
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
|
||||||
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
|
||||||
// If there are multiple Content-Type headers set, choose the first one that it can
|
|
||||||
// exactly match in the registry.
|
|
||||||
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
|
||||||
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
|
||||||
for _, acceptVal := range r.Header[acceptHeader] {
|
|
||||||
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
|
||||||
outbound = m
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
|
||||||
if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
|
|
||||||
inbound = m
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if inbound == nil {
|
|
||||||
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
|
||||||
}
|
|
||||||
if outbound == nil {
|
|
||||||
outbound = inbound
|
|
||||||
}
|
|
||||||
|
|
||||||
return inbound, outbound
|
|
||||||
}
|
|
||||||
|
|
||||||
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
|
||||||
type marshalerRegistry struct {
|
|
||||||
mimeMap map[string]Marshaler
|
|
||||||
}
|
|
||||||
|
|
||||||
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
|
||||||
// MIME type).
|
|
||||||
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
|
||||||
if len(mime) == 0 {
|
|
||||||
return errors.New("empty MIME type")
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mimeMap[mime] = marshaler
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
|
||||||
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
|
||||||
//
|
|
||||||
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
|
||||||
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
|
||||||
// with a "application/json" Content-Type.
|
|
||||||
// "*" can be used to match any Content-Type.
|
|
||||||
// This can be attached to a ServerMux with the marshaler option.
|
|
||||||
func makeMarshalerMIMERegistry() marshalerRegistry {
|
|
||||||
return marshalerRegistry{
|
|
||||||
mimeMap: map[string]Marshaler{
|
|
||||||
MIMEWildcard: defaultMarshaler,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
|
||||||
// Marshalers to a MIME type in mux.
|
|
||||||
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
|
||||||
return func(mux *ServeMux) {
|
|
||||||
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
268
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
268
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
|
@ -1,268 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/textproto"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
|
||||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
|
||||||
|
|
||||||
// ServeMux is a request multiplexer for grpc-gateway.
|
|
||||||
// It matches http requests to patterns and invokes the corresponding handler.
|
|
||||||
type ServeMux struct {
|
|
||||||
// handlers maps HTTP method to a list of handlers.
|
|
||||||
handlers map[string][]handler
|
|
||||||
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
|
||||||
marshalers marshalerRegistry
|
|
||||||
incomingHeaderMatcher HeaderMatcherFunc
|
|
||||||
outgoingHeaderMatcher HeaderMatcherFunc
|
|
||||||
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
|
||||||
protoErrorHandler ProtoErrorHandlerFunc
|
|
||||||
disablePathLengthFallback bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
|
||||||
type ServeMuxOption func(*ServeMux)
|
|
||||||
|
|
||||||
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
|
||||||
//
|
|
||||||
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
|
||||||
// http.ResponseWriter, and proto.Message before every forwarded response.
|
|
||||||
//
|
|
||||||
// The message may be nil in the case where just a header is being sent.
|
|
||||||
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
|
||||||
return func(serveMux *ServeMux) {
|
|
||||||
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
|
||||||
type HeaderMatcherFunc func(string) (string, bool)
|
|
||||||
|
|
||||||
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
|
||||||
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
|
||||||
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
|
||||||
func DefaultHeaderMatcher(key string) (string, bool) {
|
|
||||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
|
||||||
if isPermanentHTTPHeader(key) {
|
|
||||||
return MetadataPrefix + key, true
|
|
||||||
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
|
||||||
return key[len(MetadataHeaderPrefix):], true
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
|
||||||
//
|
|
||||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
|
||||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
|
||||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
|
||||||
return func(mux *ServeMux) {
|
|
||||||
mux.incomingHeaderMatcher = fn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
|
||||||
//
|
|
||||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
|
||||||
// passed to http response returned from gateway. To transform the header before passing to response,
|
|
||||||
// matcher should return modified header.
|
|
||||||
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
|
||||||
return func(mux *ServeMux) {
|
|
||||||
mux.outgoingHeaderMatcher = fn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
|
||||||
//
|
|
||||||
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
|
||||||
// is reading token from cookie and adding it in gRPC context.
|
|
||||||
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
|
||||||
return func(serveMux *ServeMux) {
|
|
||||||
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
|
|
||||||
//
|
|
||||||
// This can be used to handle an error as general proto message defined by gRPC.
|
|
||||||
// The response including body and status is not backward compatible with the default error handler.
|
|
||||||
// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
|
|
||||||
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
|
|
||||||
return func(serveMux *ServeMux) {
|
|
||||||
serveMux.protoErrorHandler = fn
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
|
||||||
func WithDisablePathLengthFallback() ServeMuxOption {
|
|
||||||
return func(serveMux *ServeMux) {
|
|
||||||
serveMux.disablePathLengthFallback = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
|
||||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
|
||||||
serveMux := &ServeMux{
|
|
||||||
handlers: make(map[string][]handler),
|
|
||||||
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
|
||||||
marshalers: makeMarshalerMIMERegistry(),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt(serveMux)
|
|
||||||
}
|
|
||||||
|
|
||||||
if serveMux.protoErrorHandler != nil {
|
|
||||||
HTTPError = serveMux.protoErrorHandler
|
|
||||||
// OtherErrorHandler is no longer used when protoErrorHandler is set.
|
|
||||||
// Overwritten by a special error handler to return Unknown.
|
|
||||||
OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
|
|
||||||
ctx := context.Background()
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(serveMux, r)
|
|
||||||
sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
|
|
||||||
serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if serveMux.incomingHeaderMatcher == nil {
|
|
||||||
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
|
||||||
}
|
|
||||||
|
|
||||||
if serveMux.outgoingHeaderMatcher == nil {
|
|
||||||
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
|
||||||
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return serveMux
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle associates "h" to the pair of HTTP method and path pattern.
|
|
||||||
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
|
||||||
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
|
||||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
|
|
||||||
path := r.URL.Path
|
|
||||||
if !strings.HasPrefix(path, "/") {
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
components := strings.Split(path[1:], "/")
|
|
||||||
l := len(components)
|
|
||||||
var verb string
|
|
||||||
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
} else if idx > 0 {
|
|
||||||
c := components[l-1]
|
|
||||||
components[l-1], verb = c[:idx], c[idx+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
|
||||||
r.Method = strings.ToUpper(override)
|
|
||||||
if err := r.ParseForm(); err != nil {
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, h := range s.handlers[r.Method] {
|
|
||||||
pathParams, err := h.pat.Match(components, verb)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
h.h(w, r, pathParams)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// lookup other methods to handle fallback from GET to POST and
|
|
||||||
// to determine if it is MethodNotAllowed or NotFound.
|
|
||||||
for m, handlers := range s.handlers {
|
|
||||||
if m == r.Method {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, h := range handlers {
|
|
||||||
pathParams, err := h.pat.Match(components, verb)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
|
||||||
if s.isPathLengthFallback(r) {
|
|
||||||
if err := r.ParseForm(); err != nil {
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.h(w, r, pathParams)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed))
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.protoErrorHandler != nil {
|
|
||||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
|
||||||
sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
|
|
||||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
|
||||||
} else {
|
|
||||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
|
||||||
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
|
||||||
return s.forwardResponseOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
|
||||||
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
|
||||||
}
|
|
||||||
|
|
||||||
type handler struct {
|
|
||||||
pat Pattern
|
|
||||||
h HandlerFunc
|
|
||||||
}
|
|
227
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
227
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
|
@ -1,227 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
|
||||||
ErrNotMatch = errors.New("not match to the path pattern")
|
|
||||||
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
|
||||||
ErrInvalidPattern = errors.New("invalid pattern")
|
|
||||||
)
|
|
||||||
|
|
||||||
type op struct {
|
|
||||||
code utilities.OpCode
|
|
||||||
operand int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
|
|
||||||
type Pattern struct {
|
|
||||||
// ops is a list of operations
|
|
||||||
ops []op
|
|
||||||
// pool is a constant pool indexed by the operands or vars.
|
|
||||||
pool []string
|
|
||||||
// vars is a list of variables names to be bound by this pattern
|
|
||||||
vars []string
|
|
||||||
// stacksize is the max depth of the stack
|
|
||||||
stacksize int
|
|
||||||
// tailLen is the length of the fixed-size segments after a deep wildcard
|
|
||||||
tailLen int
|
|
||||||
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
|
||||||
verb string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPattern returns a new Pattern from the given definition values.
|
|
||||||
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
|
||||||
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
|
||||||
// "version" must be 1 for now.
|
|
||||||
// It returns an error if the given definition is invalid.
|
|
||||||
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
|
||||||
if version != 1 {
|
|
||||||
grpclog.Infof("unsupported version: %d", version)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
|
|
||||||
l := len(ops)
|
|
||||||
if l%2 != 0 {
|
|
||||||
grpclog.Infof("odd number of ops codes: %d", l)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
typedOps []op
|
|
||||||
stack, maxstack int
|
|
||||||
tailLen int
|
|
||||||
pushMSeen bool
|
|
||||||
vars []string
|
|
||||||
)
|
|
||||||
for i := 0; i < l; i += 2 {
|
|
||||||
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
|
||||||
switch op.code {
|
|
||||||
case utilities.OpNop:
|
|
||||||
continue
|
|
||||||
case utilities.OpPush:
|
|
||||||
if pushMSeen {
|
|
||||||
tailLen++
|
|
||||||
}
|
|
||||||
stack++
|
|
||||||
case utilities.OpPushM:
|
|
||||||
if pushMSeen {
|
|
||||||
grpclog.Infof("pushM appears twice")
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
pushMSeen = true
|
|
||||||
stack++
|
|
||||||
case utilities.OpLitPush:
|
|
||||||
if op.operand < 0 || len(pool) <= op.operand {
|
|
||||||
grpclog.Infof("negative literal index: %d", op.operand)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
if pushMSeen {
|
|
||||||
tailLen++
|
|
||||||
}
|
|
||||||
stack++
|
|
||||||
case utilities.OpConcatN:
|
|
||||||
if op.operand <= 0 {
|
|
||||||
grpclog.Infof("negative concat size: %d", op.operand)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
stack -= op.operand
|
|
||||||
if stack < 0 {
|
|
||||||
grpclog.Print("stack underflow")
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
stack++
|
|
||||||
case utilities.OpCapture:
|
|
||||||
if op.operand < 0 || len(pool) <= op.operand {
|
|
||||||
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
v := pool[op.operand]
|
|
||||||
op.operand = len(vars)
|
|
||||||
vars = append(vars, v)
|
|
||||||
stack--
|
|
||||||
if stack < 0 {
|
|
||||||
grpclog.Infof("stack underflow")
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
grpclog.Infof("invalid opcode: %d", op.code)
|
|
||||||
return Pattern{}, ErrInvalidPattern
|
|
||||||
}
|
|
||||||
|
|
||||||
if maxstack < stack {
|
|
||||||
maxstack = stack
|
|
||||||
}
|
|
||||||
typedOps = append(typedOps, op)
|
|
||||||
}
|
|
||||||
return Pattern{
|
|
||||||
ops: typedOps,
|
|
||||||
pool: pool,
|
|
||||||
vars: vars,
|
|
||||||
stacksize: maxstack,
|
|
||||||
tailLen: tailLen,
|
|
||||||
verb: verb,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
|
||||||
func MustPattern(p Pattern, err error) Pattern {
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match examines components if it matches to the Pattern.
|
|
||||||
// If it matches, the function returns a mapping from field paths to their captured values.
|
|
||||||
// If otherwise, the function returns an error.
|
|
||||||
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
|
||||||
if p.verb != verb {
|
|
||||||
return nil, ErrNotMatch
|
|
||||||
}
|
|
||||||
|
|
||||||
var pos int
|
|
||||||
stack := make([]string, 0, p.stacksize)
|
|
||||||
captured := make([]string, len(p.vars))
|
|
||||||
l := len(components)
|
|
||||||
for _, op := range p.ops {
|
|
||||||
switch op.code {
|
|
||||||
case utilities.OpNop:
|
|
||||||
continue
|
|
||||||
case utilities.OpPush, utilities.OpLitPush:
|
|
||||||
if pos >= l {
|
|
||||||
return nil, ErrNotMatch
|
|
||||||
}
|
|
||||||
c := components[pos]
|
|
||||||
if op.code == utilities.OpLitPush {
|
|
||||||
if lit := p.pool[op.operand]; c != lit {
|
|
||||||
return nil, ErrNotMatch
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stack = append(stack, c)
|
|
||||||
pos++
|
|
||||||
case utilities.OpPushM:
|
|
||||||
end := len(components)
|
|
||||||
if end < pos+p.tailLen {
|
|
||||||
return nil, ErrNotMatch
|
|
||||||
}
|
|
||||||
end -= p.tailLen
|
|
||||||
stack = append(stack, strings.Join(components[pos:end], "/"))
|
|
||||||
pos = end
|
|
||||||
case utilities.OpConcatN:
|
|
||||||
n := op.operand
|
|
||||||
l := len(stack) - n
|
|
||||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
|
||||||
case utilities.OpCapture:
|
|
||||||
n := len(stack) - 1
|
|
||||||
captured[op.operand] = stack[n]
|
|
||||||
stack = stack[:n]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pos < l {
|
|
||||||
return nil, ErrNotMatch
|
|
||||||
}
|
|
||||||
bindings := make(map[string]string)
|
|
||||||
for i, val := range captured {
|
|
||||||
bindings[p.vars[i]] = val
|
|
||||||
}
|
|
||||||
return bindings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verb returns the verb part of the Pattern.
|
|
||||||
func (p Pattern) Verb() string { return p.verb }
|
|
||||||
|
|
||||||
func (p Pattern) String() string {
|
|
||||||
var stack []string
|
|
||||||
for _, op := range p.ops {
|
|
||||||
switch op.code {
|
|
||||||
case utilities.OpNop:
|
|
||||||
continue
|
|
||||||
case utilities.OpPush:
|
|
||||||
stack = append(stack, "*")
|
|
||||||
case utilities.OpLitPush:
|
|
||||||
stack = append(stack, p.pool[op.operand])
|
|
||||||
case utilities.OpPushM:
|
|
||||||
stack = append(stack, "**")
|
|
||||||
case utilities.OpConcatN:
|
|
||||||
n := op.operand
|
|
||||||
l := len(stack) - n
|
|
||||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
|
||||||
case utilities.OpCapture:
|
|
||||||
n := len(stack) - 1
|
|
||||||
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
segs := strings.Join(stack, "/")
|
|
||||||
if p.verb != "" {
|
|
||||||
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
|
||||||
}
|
|
||||||
return "/" + segs
|
|
||||||
}
|
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
|
@ -1,80 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
|
||||||
func StringP(val string) (*string, error) {
|
|
||||||
return proto.String(val), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolP parses the given string representation of a boolean value,
|
|
||||||
// and returns a pointer to a bool whose value is same as the parsed value.
|
|
||||||
func BoolP(val string) (*bool, error) {
|
|
||||||
b, err := Bool(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Bool(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64P parses the given string representation of a floating point number,
|
|
||||||
// and returns a pointer to a float64 whose value is same as the parsed number.
|
|
||||||
func Float64P(val string) (*float64, error) {
|
|
||||||
f, err := Float64(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Float64(f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32P parses the given string representation of a floating point number,
|
|
||||||
// and returns a pointer to a float32 whose value is same as the parsed number.
|
|
||||||
func Float32P(val string) (*float32, error) {
|
|
||||||
f, err := Float32(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Float32(f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64P parses the given string representation of an integer
|
|
||||||
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
|
||||||
func Int64P(val string) (*int64, error) {
|
|
||||||
i, err := Int64(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Int64(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32P parses the given string representation of an integer
|
|
||||||
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
|
||||||
func Int32P(val string) (*int32, error) {
|
|
||||||
i, err := Int32(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Int32(i), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64P parses the given string representation of an integer
|
|
||||||
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
|
||||||
func Uint64P(val string) (*uint64, error) {
|
|
||||||
i, err := Uint64(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Uint64(i), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32P parses the given string representation of an integer
|
|
||||||
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
|
||||||
func Uint32P(val string) (*uint32, error) {
|
|
||||||
i, err := Uint32(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.Uint32(i), err
|
|
||||||
}
|
|
70
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
70
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
|
@ -1,70 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"context"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
|
|
||||||
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
|
||||||
|
|
||||||
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
|
|
||||||
|
|
||||||
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
|
|
||||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
|
||||||
// If otherwise, it replies with http.StatusInternalServerError.
|
|
||||||
//
|
|
||||||
// The response body returned by this function is a Status message marshaled by a Marshaler.
|
|
||||||
//
|
|
||||||
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
|
|
||||||
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
|
||||||
// return Internal when Marshal failed
|
|
||||||
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
|
||||||
|
|
||||||
s, ok := status.FromError(err)
|
|
||||||
if !ok {
|
|
||||||
s = status.New(codes.Unknown, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Del("Trailer")
|
|
||||||
|
|
||||||
contentType := marshaler.ContentType()
|
|
||||||
// Check marshaler on run time in order to keep backwards compatability
|
|
||||||
// An interface param needs to be added to the ContentType() function on
|
|
||||||
// the Marshal interface to be able to remove this check
|
|
||||||
if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
|
|
||||||
pb := s.Proto()
|
|
||||||
contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", contentType)
|
|
||||||
|
|
||||||
buf, merr := marshaler.Marshal(s.Proto())
|
|
||||||
if merr != nil {
|
|
||||||
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
|
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
|
||||||
if _, err := io.WriteString(w, fallback); err != nil {
|
|
||||||
grpclog.Infof("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
|
||||||
handleForwardResponseTrailerHeader(w, md)
|
|
||||||
st := HTTPStatusFromCode(s.Code())
|
|
||||||
w.WriteHeader(st)
|
|
||||||
if _, err := w.Write(buf); err != nil {
|
|
||||||
grpclog.Infof("Failed to write response: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseTrailer(w, md)
|
|
||||||
}
|
|
392
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
392
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
|
@ -1,392 +0,0 @@
|
||||||
package runtime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PopulateQueryParameters populates "values" into "msg".
|
|
||||||
// A value is ignored if its key starts with one of the elements in "filter".
|
|
||||||
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
|
||||||
for key, values := range values {
|
|
||||||
re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
match := re.FindStringSubmatch(key)
|
|
||||||
if len(match) == 3 {
|
|
||||||
key = match[1]
|
|
||||||
values = append([]string{match[2]}, values...)
|
|
||||||
}
|
|
||||||
fieldPath := strings.Split(key, ".")
|
|
||||||
if filter.HasCommonPrefix(fieldPath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
|
||||||
// It instantiates missing protobuf fields as it goes.
|
|
||||||
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
|
||||||
fieldPath := strings.Split(fieldPathString, ".")
|
|
||||||
return populateFieldValueFromPath(msg, fieldPath, []string{value})
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
|
|
||||||
m := reflect.ValueOf(msg)
|
|
||||||
if m.Kind() != reflect.Ptr {
|
|
||||||
return fmt.Errorf("unexpected type %T: %v", msg, msg)
|
|
||||||
}
|
|
||||||
var props *proto.Properties
|
|
||||||
m = m.Elem()
|
|
||||||
for i, fieldName := range fieldPath {
|
|
||||||
isLast := i == len(fieldPath)-1
|
|
||||||
if !isLast && m.Kind() != reflect.Struct {
|
|
||||||
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
|
|
||||||
}
|
|
||||||
var f reflect.Value
|
|
||||||
var err error
|
|
||||||
f, props, err = fieldByProtoName(m, fieldName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if !f.IsValid() {
|
|
||||||
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch f.Kind() {
|
|
||||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
|
|
||||||
if !isLast {
|
|
||||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
|
||||||
}
|
|
||||||
m = f
|
|
||||||
case reflect.Slice:
|
|
||||||
if !isLast {
|
|
||||||
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
|
|
||||||
}
|
|
||||||
// Handle []byte
|
|
||||||
if f.Type().Elem().Kind() == reflect.Uint8 {
|
|
||||||
m = f
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return populateRepeatedField(f, values, props)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if f.IsNil() {
|
|
||||||
m = reflect.New(f.Type().Elem())
|
|
||||||
f.Set(m.Convert(f.Type()))
|
|
||||||
}
|
|
||||||
m = f.Elem()
|
|
||||||
continue
|
|
||||||
case reflect.Struct:
|
|
||||||
m = f
|
|
||||||
continue
|
|
||||||
case reflect.Map:
|
|
||||||
if !isLast {
|
|
||||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
|
||||||
}
|
|
||||||
return populateMapField(f, values, props)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch len(values) {
|
|
||||||
case 0:
|
|
||||||
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
|
|
||||||
case 1:
|
|
||||||
default:
|
|
||||||
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
|
|
||||||
}
|
|
||||||
return populateField(m, values[0], props)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
|
|
||||||
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
|
|
||||||
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
|
|
||||||
props := proto.GetProperties(m.Type())
|
|
||||||
|
|
||||||
// look up field name in oneof map
|
|
||||||
if op, ok := props.OneofTypes[name]; ok {
|
|
||||||
v := reflect.New(op.Type.Elem())
|
|
||||||
field := m.Field(op.Field)
|
|
||||||
if !field.IsNil() {
|
|
||||||
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
|
|
||||||
}
|
|
||||||
field.Set(v)
|
|
||||||
return v.Elem().Field(0), op.Prop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range props.Prop {
|
|
||||||
if p.OrigName == name {
|
|
||||||
return m.FieldByName(p.Name), p, nil
|
|
||||||
}
|
|
||||||
if p.JSONName == name {
|
|
||||||
return m.FieldByName(p.Name), p, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return reflect.Value{}, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
|
|
||||||
if len(values) != 2 {
|
|
||||||
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
key, value := values[0], values[1]
|
|
||||||
keyType := f.Type().Key()
|
|
||||||
valueType := f.Type().Elem()
|
|
||||||
if f.IsNil() {
|
|
||||||
f.Set(reflect.MakeMap(f.Type()))
|
|
||||||
}
|
|
||||||
|
|
||||||
keyConv, ok := convFromType[keyType.Kind()]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
|
|
||||||
}
|
|
||||||
valueConv, ok := convFromType[valueType.Kind()]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
|
|
||||||
if err := keyV[1].Interface(); err != nil {
|
|
||||||
return err.(error)
|
|
||||||
}
|
|
||||||
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
|
|
||||||
if err := valueV[1].Interface(); err != nil {
|
|
||||||
return err.(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
|
|
||||||
elemType := f.Type().Elem()
|
|
||||||
|
|
||||||
// is the destination field a slice of an enumeration type?
|
|
||||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
|
||||||
return populateFieldEnumRepeated(f, values, enumValMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
conv, ok := convFromType[elemType.Kind()]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("unsupported field type %s", elemType)
|
|
||||||
}
|
|
||||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
|
||||||
for i, v := range values {
|
|
||||||
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
|
||||||
if err := result[1].Interface(); err != nil {
|
|
||||||
return err.(error)
|
|
||||||
}
|
|
||||||
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateField(f reflect.Value, value string, props *proto.Properties) error {
|
|
||||||
i := f.Addr().Interface()
|
|
||||||
|
|
||||||
// Handle protobuf well known types
|
|
||||||
type wkt interface {
|
|
||||||
XXX_WellKnownType() string
|
|
||||||
}
|
|
||||||
if wkt, ok := i.(wkt); ok {
|
|
||||||
switch wkt.XXX_WellKnownType() {
|
|
||||||
case "Timestamp":
|
|
||||||
if value == "null" {
|
|
||||||
f.Field(0).SetInt(0)
|
|
||||||
f.Field(1).SetInt(0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
t, err := time.Parse(time.RFC3339Nano, value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad Timestamp: %v", err)
|
|
||||||
}
|
|
||||||
f.Field(0).SetInt(int64(t.Unix()))
|
|
||||||
f.Field(1).SetInt(int64(t.Nanosecond()))
|
|
||||||
return nil
|
|
||||||
case "Duration":
|
|
||||||
if value == "null" {
|
|
||||||
f.Field(0).SetInt(0)
|
|
||||||
f.Field(1).SetInt(0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
d, err := time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad Duration: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ns := d.Nanoseconds()
|
|
||||||
s := ns / 1e9
|
|
||||||
ns %= 1e9
|
|
||||||
f.Field(0).SetInt(s)
|
|
||||||
f.Field(1).SetInt(ns)
|
|
||||||
return nil
|
|
||||||
case "DoubleValue":
|
|
||||||
fallthrough
|
|
||||||
case "FloatValue":
|
|
||||||
float64Val, err := strconv.ParseFloat(value, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
|
||||||
}
|
|
||||||
f.Field(0).SetFloat(float64Val)
|
|
||||||
return nil
|
|
||||||
case "Int64Value":
|
|
||||||
fallthrough
|
|
||||||
case "Int32Value":
|
|
||||||
int64Val, err := strconv.ParseInt(value, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
|
||||||
}
|
|
||||||
f.Field(0).SetInt(int64Val)
|
|
||||||
return nil
|
|
||||||
case "UInt64Value":
|
|
||||||
fallthrough
|
|
||||||
case "UInt32Value":
|
|
||||||
uint64Val, err := strconv.ParseUint(value, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
|
||||||
}
|
|
||||||
f.Field(0).SetUint(uint64Val)
|
|
||||||
return nil
|
|
||||||
case "BoolValue":
|
|
||||||
if value == "true" {
|
|
||||||
f.Field(0).SetBool(true)
|
|
||||||
} else if value == "false" {
|
|
||||||
f.Field(0).SetBool(false)
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("bad BoolValue: %s", value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case "StringValue":
|
|
||||||
f.Field(0).SetString(value)
|
|
||||||
return nil
|
|
||||||
case "BytesValue":
|
|
||||||
bytesVal, err := base64.StdEncoding.DecodeString(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad BytesValue: %s", value)
|
|
||||||
}
|
|
||||||
f.Field(0).SetBytes(bytesVal)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle google well known types
|
|
||||||
if gwkt, ok := i.(proto.Message); ok {
|
|
||||||
switch proto.MessageName(gwkt) {
|
|
||||||
case "google.protobuf.FieldMask":
|
|
||||||
p := f.Field(0)
|
|
||||||
for _, v := range strings.Split(value, ",") {
|
|
||||||
if v != "" {
|
|
||||||
p.Set(reflect.Append(p, reflect.ValueOf(v)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle Time and Duration stdlib types
|
|
||||||
switch t := i.(type) {
|
|
||||||
case *time.Time:
|
|
||||||
pt, err := time.Parse(time.RFC3339Nano, value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad Timestamp: %v", err)
|
|
||||||
}
|
|
||||||
*t = pt
|
|
||||||
return nil
|
|
||||||
case *time.Duration:
|
|
||||||
d, err := time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bad Duration: %v", err)
|
|
||||||
}
|
|
||||||
*t = d
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// is the destination field an enumeration type?
|
|
||||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
|
||||||
return populateFieldEnum(f, value, enumValMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
conv, ok := convFromType[f.Kind()]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("field type %T is not supported in query parameters", i)
|
|
||||||
}
|
|
||||||
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
|
|
||||||
if err := result[1].Interface(); err != nil {
|
|
||||||
return err.(error)
|
|
||||||
}
|
|
||||||
f.Set(result[0].Convert(f.Type()))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
|
|
||||||
// see if it's an enumeration string
|
|
||||||
if enumVal, ok := enumValMap[value]; ok {
|
|
||||||
return reflect.ValueOf(enumVal).Convert(t), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// check for an integer that matches an enumeration value
|
|
||||||
eVal, err := strconv.Atoi(value)
|
|
||||||
if err != nil {
|
|
||||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
|
||||||
}
|
|
||||||
for _, v := range enumValMap {
|
|
||||||
if v == int32(eVal) {
|
|
||||||
return reflect.ValueOf(eVal).Convert(t), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
|
|
||||||
cval, err := convertEnum(value, f.Type(), enumValMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.Set(cval)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
|
|
||||||
elemType := f.Type().Elem()
|
|
||||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
|
||||||
for i, v := range values {
|
|
||||||
result, err := convertEnum(v, elemType, enumValMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.Index(i).Set(result)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
convFromType = map[reflect.Kind]reflect.Value{
|
|
||||||
reflect.String: reflect.ValueOf(String),
|
|
||||||
reflect.Bool: reflect.ValueOf(Bool),
|
|
||||||
reflect.Float64: reflect.ValueOf(Float64),
|
|
||||||
reflect.Float32: reflect.ValueOf(Float32),
|
|
||||||
reflect.Int64: reflect.ValueOf(Int64),
|
|
||||||
reflect.Int32: reflect.ValueOf(Int32),
|
|
||||||
reflect.Uint64: reflect.ValueOf(Uint64),
|
|
||||||
reflect.Uint32: reflect.ValueOf(Uint32),
|
|
||||||
reflect.Slice: reflect.ValueOf(Bytes),
|
|
||||||
}
|
|
||||||
)
|
|
21
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
generated
vendored
21
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
|
||||||
|
|
||||||
package(default_visibility = ["//visibility:public"])
|
|
||||||
|
|
||||||
go_library(
|
|
||||||
name = "go_default_library",
|
|
||||||
srcs = [
|
|
||||||
"doc.go",
|
|
||||||
"pattern.go",
|
|
||||||
"readerfactory.go",
|
|
||||||
"trie.go",
|
|
||||||
],
|
|
||||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
|
|
||||||
)
|
|
||||||
|
|
||||||
go_test(
|
|
||||||
name = "go_default_test",
|
|
||||||
size = "small",
|
|
||||||
srcs = ["trie_test.go"],
|
|
||||||
embed = [":go_default_library"],
|
|
||||||
)
|
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
|
@ -1,2 +0,0 @@
|
||||||
// Package utilities provides members for internal use in grpc-gateway.
|
|
||||||
package utilities
|
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
package utilities
|
|
||||||
|
|
||||||
// An OpCode is a opcode of compiled path patterns.
|
|
||||||
type OpCode int
|
|
||||||
|
|
||||||
// These constants are the valid values of OpCode.
|
|
||||||
const (
|
|
||||||
// OpNop does nothing
|
|
||||||
OpNop = OpCode(iota)
|
|
||||||
// OpPush pushes a component to stack
|
|
||||||
OpPush
|
|
||||||
// OpLitPush pushes a component to stack if it matches to the literal
|
|
||||||
OpLitPush
|
|
||||||
// OpPushM concatenates the remaining components and pushes it to stack
|
|
||||||
OpPushM
|
|
||||||
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
|
||||||
OpConcatN
|
|
||||||
// OpCapture pops an item and binds it to the variable
|
|
||||||
OpCapture
|
|
||||||
// OpEnd is the least positive invalid opcode.
|
|
||||||
OpEnd
|
|
||||||
)
|
|
20
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
generated
vendored
20
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
generated
vendored
|
@ -1,20 +0,0 @@
|
||||||
package utilities
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
|
||||||
// at the start of the stream
|
|
||||||
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() io.Reader {
|
|
||||||
return bytes.NewReader(b)
|
|
||||||
}, nil
|
|
||||||
}
|
|
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
|
@ -1,177 +0,0 @@
|
||||||
package utilities
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
|
||||||
type DoubleArray struct {
|
|
||||||
// Encoding keeps an encoding from string to int
|
|
||||||
Encoding map[string]int
|
|
||||||
// Base is the base array of Double Array
|
|
||||||
Base []int
|
|
||||||
// Check is the check array of Double Array
|
|
||||||
Check []int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
|
||||||
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
|
||||||
da := &DoubleArray{Encoding: make(map[string]int)}
|
|
||||||
if len(seqs) == 0 {
|
|
||||||
return da
|
|
||||||
}
|
|
||||||
|
|
||||||
encoded := registerTokens(da, seqs)
|
|
||||||
sort.Sort(byLex(encoded))
|
|
||||||
|
|
||||||
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
|
||||||
addSeqs(da, encoded, 0, root)
|
|
||||||
|
|
||||||
for i := len(da.Base); i > 0; i-- {
|
|
||||||
if da.Check[i-1] != 0 {
|
|
||||||
da.Base = da.Base[:i]
|
|
||||||
da.Check = da.Check[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return da
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
|
||||||
var result [][]int
|
|
||||||
for _, seq := range seqs {
|
|
||||||
var encoded []int
|
|
||||||
for _, token := range seq {
|
|
||||||
if _, ok := da.Encoding[token]; !ok {
|
|
||||||
da.Encoding[token] = len(da.Encoding)
|
|
||||||
}
|
|
||||||
encoded = append(encoded, da.Encoding[token])
|
|
||||||
}
|
|
||||||
result = append(result, encoded)
|
|
||||||
}
|
|
||||||
for i := range result {
|
|
||||||
result[i] = append(result[i], len(da.Encoding))
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
row, col int
|
|
||||||
left, right int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n node) value(seqs [][]int) int {
|
|
||||||
return seqs[n.row][n.col]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n node) children(seqs [][]int) []*node {
|
|
||||||
var result []*node
|
|
||||||
lastVal := int(-1)
|
|
||||||
last := new(node)
|
|
||||||
for i := n.left; i < n.right; i++ {
|
|
||||||
if lastVal == seqs[i][n.col+1] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
last.right = i
|
|
||||||
last = &node{
|
|
||||||
row: i,
|
|
||||||
col: n.col + 1,
|
|
||||||
left: i,
|
|
||||||
}
|
|
||||||
result = append(result, last)
|
|
||||||
}
|
|
||||||
last.right = n.right
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
|
||||||
ensureSize(da, pos)
|
|
||||||
|
|
||||||
children := n.children(seqs)
|
|
||||||
var i int
|
|
||||||
for i = 1; ; i++ {
|
|
||||||
ok := func() bool {
|
|
||||||
for _, child := range children {
|
|
||||||
code := child.value(seqs)
|
|
||||||
j := i + code
|
|
||||||
ensureSize(da, j)
|
|
||||||
if da.Check[j] != 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}()
|
|
||||||
if ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
da.Base[pos] = i
|
|
||||||
for _, child := range children {
|
|
||||||
code := child.value(seqs)
|
|
||||||
j := i + code
|
|
||||||
da.Check[j] = pos + 1
|
|
||||||
}
|
|
||||||
terminator := len(da.Encoding)
|
|
||||||
for _, child := range children {
|
|
||||||
code := child.value(seqs)
|
|
||||||
if code == terminator {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
j := i + code
|
|
||||||
addSeqs(da, seqs, j, *child)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureSize(da *DoubleArray, i int) {
|
|
||||||
for i >= len(da.Base) {
|
|
||||||
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
|
||||||
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type byLex [][]int
|
|
||||||
|
|
||||||
func (l byLex) Len() int { return len(l) }
|
|
||||||
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
|
||||||
func (l byLex) Less(i, j int) bool {
|
|
||||||
si := l[i]
|
|
||||||
sj := l[j]
|
|
||||||
var k int
|
|
||||||
for k = 0; k < len(si) && k < len(sj); k++ {
|
|
||||||
if si[k] < sj[k] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if si[k] > sj[k] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if k < len(sj) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
|
||||||
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
|
||||||
if len(da.Base) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var i int
|
|
||||||
for _, t := range seq {
|
|
||||||
code, ok := da.Encoding[t]
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
j := da.Base[i] + code
|
|
||||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i = j
|
|
||||||
}
|
|
||||||
j := da.Base[i] + len(da.Encoding)
|
|
||||||
if len(da.Check) <= j || da.Check[j] != i+1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
9868
vendor/github.com/minio/minio-go/functional_tests.go
generated
vendored
9868
vendor/github.com/minio/minio-go/functional_tests.go
generated
vendored
File diff suppressed because it is too large
Load diff
56
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
56
vendor/go.opencensus.io/plugin/ocgrpc/client.go
generated
vendored
|
@ -1,56 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and
|
|
||||||
// traces. Use with gRPC clients only.
|
|
||||||
type ClientHandler struct {
|
|
||||||
// StartOptions allows configuring the StartOptions used to create new spans.
|
|
||||||
//
|
|
||||||
// StartOptions.SpanKind will always be set to trace.SpanKindClient
|
|
||||||
// for spans started by this handler.
|
|
||||||
StartOptions trace.StartOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleConn exists to satisfy gRPC stats.Handler.
|
|
||||||
func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
|
|
||||||
// no-op
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagConn exists to satisfy gRPC stats.Handler.
|
|
||||||
func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
|
|
||||||
// no-op
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
|
||||||
func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
|
||||||
traceHandleRPC(ctx, rs)
|
|
||||||
statsHandleRPC(ctx, rs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagRPC implements per-RPC context management.
|
|
||||||
func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
|
||||||
ctx = c.traceTagRPC(ctx, rti)
|
|
||||||
ctx = c.statsTagRPC(ctx, rti)
|
|
||||||
return ctx
|
|
||||||
}
|
|
107
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
107
vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go
generated
vendored
|
@ -1,107 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The following variables are measures are recorded by ClientHandler:
|
|
||||||
var (
|
|
||||||
ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
|
||||||
ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes)
|
|
||||||
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
|
|
||||||
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
|
|
||||||
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
|
|
||||||
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Predefined views may be registered to collect data for the above measures.
|
|
||||||
// As always, you may also define your own custom views over measures collected by this
|
|
||||||
// package. These are declared as a convenience only; none are registered by
|
|
||||||
// default.
|
|
||||||
var (
|
|
||||||
ClientSentBytesPerRPCView = &view.View{
|
|
||||||
Measure: ClientSentBytesPerRPC,
|
|
||||||
Name: "grpc.io/client/sent_bytes_per_rpc",
|
|
||||||
Description: "Distribution of bytes sent per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultBytesDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientReceivedBytesPerRPCView = &view.View{
|
|
||||||
Measure: ClientReceivedBytesPerRPC,
|
|
||||||
Name: "grpc.io/client/received_bytes_per_rpc",
|
|
||||||
Description: "Distribution of bytes received per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultBytesDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientRoundtripLatencyView = &view.View{
|
|
||||||
Measure: ClientRoundtripLatency,
|
|
||||||
Name: "grpc.io/client/roundtrip_latency",
|
|
||||||
Description: "Distribution of round-trip latency, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultMillisecondsDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientCompletedRPCsView = &view.View{
|
|
||||||
Measure: ClientRoundtripLatency,
|
|
||||||
Name: "grpc.io/client/completed_rpcs",
|
|
||||||
Description: "Count of RPCs by method and status.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
|
|
||||||
Aggregation: view.Count(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientSentMessagesPerRPCView = &view.View{
|
|
||||||
Measure: ClientSentMessagesPerRPC,
|
|
||||||
Name: "grpc.io/client/sent_messages_per_rpc",
|
|
||||||
Description: "Distribution of sent messages count per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientReceivedMessagesPerRPCView = &view.View{
|
|
||||||
Measure: ClientReceivedMessagesPerRPC,
|
|
||||||
Name: "grpc.io/client/received_messages_per_rpc",
|
|
||||||
Description: "Distribution of received messages count per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ClientServerLatencyView = &view.View{
|
|
||||||
Measure: ClientServerLatency,
|
|
||||||
Name: "grpc.io/client/server_latency",
|
|
||||||
Description: "Distribution of server latency as viewed by client, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyClientMethod},
|
|
||||||
Aggregation: DefaultMillisecondsDistribution,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultClientViews are the default client views provided by this package.
|
|
||||||
var DefaultClientViews = []*view.View{
|
|
||||||
ClientSentBytesPerRPCView,
|
|
||||||
ClientReceivedBytesPerRPCView,
|
|
||||||
ClientRoundtripLatencyView,
|
|
||||||
ClientCompletedRPCsView,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count.
|
|
||||||
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
|
||||||
// mechanism to load these defaults from a common repository/config shared by
|
|
||||||
// all supported languages. Likely a serialized protobuf of these defaults.
|
|
49
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
49
vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
)
|
|
||||||
|
|
||||||
// statsTagRPC gets the tag.Map populated by the application code, serializes
|
|
||||||
// its tags into the GRPC metadata in order to be sent to the server.
|
|
||||||
func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
|
||||||
startTime := time.Now()
|
|
||||||
if info == nil {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
|
|
||||||
}
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &rpcData{
|
|
||||||
startTime: startTime,
|
|
||||||
method: info.FullMethodName,
|
|
||||||
}
|
|
||||||
ts := tag.FromContext(ctx)
|
|
||||||
if ts != nil {
|
|
||||||
encoded := tag.Encode(ts)
|
|
||||||
ctx = stats.SetTags(ctx, encoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
return context.WithValue(ctx, rpcDataKey, d)
|
|
||||||
}
|
|
19
vendor/go.opencensus.io/plugin/ocgrpc/doc.go
generated
vendored
19
vendor/go.opencensus.io/plugin/ocgrpc/doc.go
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package ocgrpc contains OpenCensus stats and trace
|
|
||||||
// integrations for gRPC.
|
|
||||||
//
|
|
||||||
// Use ServerHandler for servers and ClientHandler for clients.
|
|
||||||
package ocgrpc // import "go.opencensus.io/plugin/ocgrpc"
|
|
80
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
80
vendor/go.opencensus.io/plugin/ocgrpc/server.go
generated
vendored
|
@ -1,80 +0,0 @@
|
||||||
// Copyright 2018, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and
|
|
||||||
// traces. Use with gRPC servers.
|
|
||||||
//
|
|
||||||
// When installed (see Example), tracing metadata is read from inbound RPCs
|
|
||||||
// by default. If no tracing metadata is present, or if the tracing metadata is
|
|
||||||
// present but the SpanContext isn't sampled, then a new trace may be started
|
|
||||||
// (as determined by Sampler).
|
|
||||||
type ServerHandler struct {
|
|
||||||
// IsPublicEndpoint may be set to true to always start a new trace around
|
|
||||||
// each RPC. Any SpanContext in the RPC metadata will be added as a linked
|
|
||||||
// span instead of making it the parent of the span created around the
|
|
||||||
// server RPC.
|
|
||||||
//
|
|
||||||
// Be aware that if you leave this false (the default) on a public-facing
|
|
||||||
// server, callers will be able to send tracing metadata in gRPC headers
|
|
||||||
// and trigger traces in your backend.
|
|
||||||
IsPublicEndpoint bool
|
|
||||||
|
|
||||||
// StartOptions to use for to spans started around RPCs handled by this server.
|
|
||||||
//
|
|
||||||
// These will apply even if there is tracing metadata already
|
|
||||||
// present on the inbound RPC but the SpanContext is not sampled. This
|
|
||||||
// ensures that each service has some opportunity to be traced. If you would
|
|
||||||
// like to not add any additional traces for this gRPC service, set:
|
|
||||||
//
|
|
||||||
// StartOptions.Sampler = trace.ProbabilitySampler(0.0)
|
|
||||||
//
|
|
||||||
// StartOptions.SpanKind will always be set to trace.SpanKindServer
|
|
||||||
// for spans started by this handler.
|
|
||||||
StartOptions trace.StartOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ stats.Handler = (*ServerHandler)(nil)
|
|
||||||
|
|
||||||
// HandleConn exists to satisfy gRPC stats.Handler.
|
|
||||||
func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) {
|
|
||||||
// no-op
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagConn exists to satisfy gRPC stats.Handler.
|
|
||||||
func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
|
|
||||||
// no-op
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// HandleRPC implements per-RPC tracing and stats instrumentation.
|
|
||||||
func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
|
|
||||||
traceHandleRPC(ctx, rs)
|
|
||||||
statsHandleRPC(ctx, rs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TagRPC implements per-RPC context management.
|
|
||||||
func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context {
|
|
||||||
ctx = s.traceTagRPC(ctx, rti)
|
|
||||||
ctx = s.statsTagRPC(ctx, rti)
|
|
||||||
return ctx
|
|
||||||
}
|
|
97
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
97
vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go
generated
vendored
|
@ -1,97 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The following variables are measures are recorded by ServerHandler:
|
|
||||||
var (
|
|
||||||
ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
|
||||||
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
|
|
||||||
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
|
|
||||||
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
|
|
||||||
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO(acetechnologist): This is temporary and will need to be replaced by a
|
|
||||||
// mechanism to load these defaults from a common repository/config shared by
|
|
||||||
// all supported languages. Likely a serialized protobuf of these defaults.
|
|
||||||
|
|
||||||
// Predefined views may be registered to collect data for the above measures.
|
|
||||||
// As always, you may also define your own custom views over measures collected by this
|
|
||||||
// package. These are declared as a convenience only; none are registered by
|
|
||||||
// default.
|
|
||||||
var (
|
|
||||||
ServerReceivedBytesPerRPCView = &view.View{
|
|
||||||
Name: "grpc.io/server/received_bytes_per_rpc",
|
|
||||||
Description: "Distribution of received bytes per RPC, by method.",
|
|
||||||
Measure: ServerReceivedBytesPerRPC,
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod},
|
|
||||||
Aggregation: DefaultBytesDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerSentBytesPerRPCView = &view.View{
|
|
||||||
Name: "grpc.io/server/sent_bytes_per_rpc",
|
|
||||||
Description: "Distribution of total sent bytes per RPC, by method.",
|
|
||||||
Measure: ServerSentBytesPerRPC,
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod},
|
|
||||||
Aggregation: DefaultBytesDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerLatencyView = &view.View{
|
|
||||||
Name: "grpc.io/server/server_latency",
|
|
||||||
Description: "Distribution of server latency in milliseconds, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod},
|
|
||||||
Measure: ServerLatency,
|
|
||||||
Aggregation: DefaultMillisecondsDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerCompletedRPCsView = &view.View{
|
|
||||||
Name: "grpc.io/server/completed_rpcs",
|
|
||||||
Description: "Count of RPCs by method and status.",
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus},
|
|
||||||
Measure: ServerLatency,
|
|
||||||
Aggregation: view.Count(),
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerReceivedMessagesPerRPCView = &view.View{
|
|
||||||
Name: "grpc.io/server/received_messages_per_rpc",
|
|
||||||
Description: "Distribution of messages received count per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod},
|
|
||||||
Measure: ServerReceivedMessagesPerRPC,
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerSentMessagesPerRPCView = &view.View{
|
|
||||||
Name: "grpc.io/server/sent_messages_per_rpc",
|
|
||||||
Description: "Distribution of messages sent count per RPC, by method.",
|
|
||||||
TagKeys: []tag.Key{KeyServerMethod},
|
|
||||||
Measure: ServerSentMessagesPerRPC,
|
|
||||||
Aggregation: DefaultMessageCountDistribution,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultServerViews are the default server views provided by this package.
|
|
||||||
var DefaultServerViews = []*view.View{
|
|
||||||
ServerReceivedBytesPerRPCView,
|
|
||||||
ServerSentBytesPerRPCView,
|
|
||||||
ServerLatencyView,
|
|
||||||
ServerCompletedRPCsView,
|
|
||||||
}
|
|
63
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
63
vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go
generated
vendored
|
@ -1,63 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
)
|
|
||||||
|
|
||||||
// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from
|
|
||||||
// it and creates a new tag.Map and puts them into the returned context.
|
|
||||||
func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
|
|
||||||
startTime := time.Now()
|
|
||||||
if info == nil {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infof("opencensus: TagRPC called with nil info.")
|
|
||||||
}
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
d := &rpcData{
|
|
||||||
startTime: startTime,
|
|
||||||
method: info.FullMethodName,
|
|
||||||
}
|
|
||||||
propagated := h.extractPropagatedTags(ctx)
|
|
||||||
ctx = tag.NewContext(ctx, propagated)
|
|
||||||
ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName)))
|
|
||||||
return context.WithValue(ctx, rpcDataKey, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extractPropagatedTags creates a new tag map containing the tags extracted from the
|
|
||||||
// gRPC metadata.
|
|
||||||
func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map {
|
|
||||||
buf := stats.Tags(ctx)
|
|
||||||
if buf == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
propagated, err := tag.Decode(buf)
|
|
||||||
if err != nil {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return propagated
|
|
||||||
}
|
|
208
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
208
vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
generated
vendored
|
@ -1,208 +0,0 @@
|
||||||
// Copyright 2017, OpenCensus Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
//
|
|
||||||
|
|
||||||
package ocgrpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
ocstats "go.opencensus.io/stats"
|
|
||||||
"go.opencensus.io/stats/view"
|
|
||||||
"go.opencensus.io/tag"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
type grpcInstrumentationKey string
|
|
||||||
|
|
||||||
// rpcData holds the instrumentation RPC data that is needed between the start
|
|
||||||
// and end of an call. It holds the info that this package needs to keep track
|
|
||||||
// of between the various GRPC events.
|
|
||||||
type rpcData struct {
|
|
||||||
// reqCount and respCount has to be the first words
|
|
||||||
// in order to be 64-aligned on 32-bit architectures.
|
|
||||||
sentCount, sentBytes, recvCount, recvBytes int64 // access atomically
|
|
||||||
|
|
||||||
// startTime represents the time at which TagRPC was invoked at the
|
|
||||||
// beginning of an RPC. It is an appoximation of the time when the
|
|
||||||
// application code invoked GRPC code.
|
|
||||||
startTime time.Time
|
|
||||||
method string
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following variables define the default hard-coded auxiliary data used by
|
|
||||||
// both the default GRPC client and GRPC server metrics.
|
|
||||||
var (
|
|
||||||
DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
|
|
||||||
DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
|
|
||||||
DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Server tags are applied to the context used to process each RPC, as well as
|
|
||||||
// the measures at the end of each RPC.
|
|
||||||
var (
|
|
||||||
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
|
|
||||||
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Client tags are applied to measures at the end of each RPC.
|
|
||||||
var (
|
|
||||||
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
|
|
||||||
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
rpcDataKey = grpcInstrumentationKey("opencensus-rpcData")
|
|
||||||
)
|
|
||||||
|
|
||||||
func methodName(fullname string) string {
|
|
||||||
return strings.TrimLeft(fullname, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// statsHandleRPC processes the RPC events.
|
|
||||||
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
|
|
||||||
switch st := s.(type) {
|
|
||||||
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
|
|
||||||
// do nothing for client
|
|
||||||
case *stats.OutPayload:
|
|
||||||
handleRPCOutPayload(ctx, st)
|
|
||||||
case *stats.InPayload:
|
|
||||||
handleRPCInPayload(ctx, st)
|
|
||||||
case *stats.End:
|
|
||||||
handleRPCEnd(ctx, st)
|
|
||||||
default:
|
|
||||||
grpclog.Infof("unexpected stats: %T", st)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
|
|
||||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.AddInt64(&d.sentBytes, int64(s.Length))
|
|
||||||
atomic.AddInt64(&d.sentCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleRPCInPayload(ctx context.Context, s *stats.InPayload) {
|
|
||||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.AddInt64(&d.recvBytes, int64(s.Length))
|
|
||||||
atomic.AddInt64(&d.recvCount, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleRPCEnd(ctx context.Context, s *stats.End) {
|
|
||||||
d, ok := ctx.Value(rpcDataKey).(*rpcData)
|
|
||||||
if !ok {
|
|
||||||
if grpclog.V(2) {
|
|
||||||
grpclog.Infoln("Failed to retrieve *rpcData from context.")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elapsedTime := time.Since(d.startTime)
|
|
||||||
|
|
||||||
var st string
|
|
||||||
if s.Error != nil {
|
|
||||||
s, ok := status.FromError(s.Error)
|
|
||||||
if ok {
|
|
||||||
st = statusCodeToString(s)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
st = "OK"
|
|
||||||
}
|
|
||||||
|
|
||||||
latencyMillis := float64(elapsedTime) / float64(time.Millisecond)
|
|
||||||
if s.Client {
|
|
||||||
ocstats.RecordWithTags(ctx,
|
|
||||||
[]tag.Mutator{
|
|
||||||
tag.Upsert(KeyClientMethod, methodName(d.method)),
|
|
||||||
tag.Upsert(KeyClientStatus, st),
|
|
||||||
},
|
|
||||||
ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
|
||||||
ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
|
||||||
ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
|
||||||
ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
|
||||||
ClientRoundtripLatency.M(latencyMillis))
|
|
||||||
} else {
|
|
||||||
ocstats.RecordWithTags(ctx,
|
|
||||||
[]tag.Mutator{
|
|
||||||
tag.Upsert(KeyServerStatus, st),
|
|
||||||
},
|
|
||||||
ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)),
|
|
||||||
ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)),
|
|
||||||
ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)),
|
|
||||||
ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)),
|
|
||||||
ServerLatency.M(latencyMillis))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func statusCodeToString(s *status.Status) string {
|
|
||||||
// see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
|
|
||||||
switch c := s.Code(); c {
|
|
||||||
case codes.OK:
|
|
||||||
return "OK"
|
|
||||||
case codes.Canceled:
|
|
||||||
return "CANCELLED"
|
|
||||||
case codes.Unknown:
|
|
||||||
return "UNKNOWN"
|
|
||||||
case codes.InvalidArgument:
|
|
||||||
return "INVALID_ARGUMENT"
|
|
||||||
case codes.DeadlineExceeded:
|
|
||||||
return "DEADLINE_EXCEEDED"
|
|
||||||
case codes.NotFound:
|
|
||||||
return "NOT_FOUND"
|
|
||||||
case codes.AlreadyExists:
|
|
||||||
return "ALREADY_EXISTS"
|
|
||||||
case codes.PermissionDenied:
|
|
||||||
return "PERMISSION_DENIED"
|
|
||||||
case codes.ResourceExhausted:
|
|
||||||
return "RESOURCE_EXHAUSTED"
|
|
||||||
case codes.FailedPrecondition:
|
|
||||||
return "FAILED_PRECONDITION"
|
|
||||||
case codes.Aborted:
|
|
||||||
return "ABORTED"
|
|
||||||
case codes.OutOfRange:
|
|
||||||
return "OUT_OF_RANGE"
|
|
||||||
case codes.Unimplemented:
|
|
||||||
return "UNIMPLEMENTED"
|
|
||||||
case codes.Internal:
|
|
||||||
return "INTERNAL"
|
|
||||||
case codes.Unavailable:
|
|
||||||
return "UNAVAILABLE"
|
|
||||||
case codes.DataLoss:
|
|
||||||
return "DATA_LOSS"
|
|
||||||
case codes.Unauthenticated:
|
|
||||||
return "UNAUTHENTICATED"
|
|
||||||
default:
|
|
||||||
return "CODE_" + strconv.FormatInt(int64(c), 10)
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue